code
stringlengths
118
171k
apis
sequence
extract_api
stringlengths
145
164k
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # -*- coding:utf-8 -*- """ Person Re-Identification models """ # Version: 0.0.1 # Author: scorpio.lu(<EMAIL>) # Data: 06/28/2020 import oneflow as flow BLOCK_COUNTS = [3, 4, 6, 3] BLOCK_FILTERS = [256, 512, 1024, 2048] BLOCK_FILTERS_INNER = [64, 128, 256, 512] def _conv2d( name, input, filters, kernel_size, strides=1, padding="SAME", data_format="NCHW", dilations=1, trainable=True, weight_initializer=flow.variance_scaling_initializer(data_format="NCHW"), ): weight = flow.get_variable( name + "-weight", shape=(filters, input.shape[1], kernel_size, kernel_size), dtype=input.dtype, initializer=weight_initializer, trainable=trainable, ) return flow.nn.conv2d( input, weight, strides, padding, data_format, dilations, name=name ) def conv2d_affine(input, name, filters, kernel_size, strides, activation=None, trainable=True): """conv2d + batch norm + relu unit""" # input data_format must be NCHW padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID" output = _conv2d(name, input, filters, kernel_size, strides, padding, trainable=trainable) output = _batch_norm(output, name + "_bn", trainable) if activation == "Relu": output = flow.nn.relu(output) return output def bottleneck_transformation(input, block_name, filters, filters_inner, strides, trainable=True): """1*1 conv2d_affine + 3*3 conv2d_affine + 1*1 conv2d_affine""" a = conv2d_affine( input, block_name + "-branch2a", filters_inner, 1, 1, activation="Relu", trainable=trainable ) b = conv2d_affine( a, block_name + "-branch2b", filters_inner, 3, strides, activation="Relu", trainable=trainable ) c = conv2d_affine(b, block_name + "-branch2c", filters, 1, 1, trainable=trainable) return c def _batch_norm(inputs, name=None, trainable=True): """batch normalization""" return flow.layers.batch_normalization( inputs=inputs, axis=1, momentum=0.997, epsilon=1.001e-5, center=True, scale=True, trainable=trainable, name=name, ) def layer0(input, trainable): """conv2d + relu + max pooling""" conv1 = _conv2d("conv1", input, 64, 7, 2, trainable=trainable) conv1_bn = flow.nn.relu(_batch_norm(conv1, "bn1", trainable)) pool1 = flow.nn.max_pool2d( conv1_bn, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool1", ) return pool1 def resnet_conv_x_body(input, on_stage_end=lambda x: x, trainable=True): """residual blocks of layers""" output = input for i, (counts, filters, filters_inner) in enumerate( zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER) ): stage_name = "layer%d" % (i + 1) output = residual_stage( output, stage_name, counts, filters, filters_inner, 1 if i == 0 or i == 3 else 2, trainable=trainable ) on_stage_end(output) return output def residual_stage(input, stage_name, counts, filters, filters_inner, stride_init=2, trainable=True): """4 layers""" output = input for i in range(counts): block_name = "%s-%d" % (stage_name, i) output = residual_block( output, block_name, filters, filters_inner, stride_init if i == 0 else 1, trainable=trainable ) return output def residual_block(input, block_name, filters, filters_inner, strides_init, trainable): """a residual block""" if strides_init != 1 or block_name == "layer1-0" or block_name == "layer4-0": shortcut = conv2d_affine( input, block_name + '-downsample', filters, 1, strides_init, trainable=trainable ) else: shortcut = input bottleneck = bottleneck_transformation( input, block_name, filters, filters_inner, strides_init, trainable=trainable ) return flow.nn.relu(bottleneck + shortcut) def resreid_train(images, num_class=751, trainable=True): """use resnet50 as backbone, modify the stride of last layer to be 1 for rich person features """ with flow.scope.namespace("base"): stem = layer0(images, trainable=trainable) body = resnet_conv_x_body(stem, lambda x: x, trainable=trainable) with flow.scope.namespace("gap"): pool5 = flow.nn.avg_pool2d(body, ksize=[16, 8], strides=1, padding="VALID", data_format="NCHW", name="pool5") feature = flow.reshape(pool5, [pool5.shape[0], -1]) if not trainable: return feature bn1 = flow.layers.batch_normalization( feature, axis=1, center=False, beta_initializer=flow.constant_initializer(0), gamma_initializer=flow.random_normal_initializer(mean=1, stddev=0.02), trainable=trainable, name='bnout' ) fc6 = flow.layers.dense( inputs=bn1, units=num_class, activation=None, use_bias=False, kernel_initializer=flow.random_normal_initializer(mean=0, stddev=0.01), trainable=trainable, name="fc6", ) return feature, fc6 def HS_reid_train(images, num_class=751, trainable=False): """Slice feature map into two parts horizontally by GAP in order to mining discriminative features""" with flow.scope.namespace("base"): stem = layer0(images, trainable=trainable) body = resnet_conv_x_body(stem, lambda x: x, trainable=trainable) with flow.scope.namespace("gap"): pool5 = flow.nn.avg_pool2d(body, ksize=[4, 8], strides=4, padding="VALID", data_format="NCHW", name="pool5") feature = flow.reshape(pool5, [pool5.shape[0], -1]) if not trainable: return feature bn1 = flow.layers.batch_normalization( feature, axis=1, center=False, beta_initializer=flow.constant_initializer(0), gamma_initializer=flow.random_normal_initializer(mean=1, stddev=0.02), trainable=trainable, name='bnout' ) fc6 = flow.layers.dense( inputs=bn1, units=num_class, activation=None, use_bias=False, kernel_initializer=flow.random_normal_initializer(mean=0, stddev=0.01), trainable=trainable, name="fc6", ) return feature, fc6
[ "oneflow.nn.relu", "oneflow.nn.avg_pool2d", "oneflow.constant_initializer", "oneflow.variance_scaling_initializer", "oneflow.nn.max_pool2d", "oneflow.reshape", "oneflow.layers.batch_normalization", "oneflow.random_normal_initializer", "oneflow.nn.conv2d", "oneflow.scope.namespace", "oneflow.get_variable" ]
[((1076, 1129), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', ([], {'data_format': '"""NCHW"""'}), "(data_format='NCHW')\n", (1109, 1129), True, 'import oneflow as flow\n'), ((1147, 1322), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': '(filters, input.shape[1], kernel_size, kernel_size)', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'trainable': 'trainable'}), "(name + '-weight', shape=(filters, input.shape[1],\n kernel_size, kernel_size), dtype=input.dtype, initializer=\n weight_initializer, trainable=trainable)\n", (1164, 1322), True, 'import oneflow as flow\n'), ((1372, 1458), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilations'], {'name': 'name'}), '(input, weight, strides, padding, data_format, dilations,\n name=name)\n', (1386, 1458), True, 'import oneflow as flow\n'), ((2582, 2732), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': '(1)', 'momentum': '(0.997)', 'epsilon': '(1.001e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'trainable', 'name': 'name'}), '(inputs=inputs, axis=1, momentum=0.997,\n epsilon=1.001e-05, center=True, scale=True, trainable=trainable, name=name)\n', (2613, 2732), True, 'import oneflow as flow\n'), ((3014, 3117), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv1_bn'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool1"""'}), "(conv1_bn, ksize=3, strides=2, padding='VALID',\n data_format='NCHW', name='pool1')\n", (3032, 3117), True, 'import oneflow as flow\n'), ((4557, 4592), 'oneflow.nn.relu', 'flow.nn.relu', (['(bottleneck + shortcut)'], {}), '(bottleneck + shortcut)\n', (4569, 4592), True, 'import oneflow as flow\n'), ((1913, 1933), 'oneflow.nn.relu', 'flow.nn.relu', (['output'], {}), '(output)\n', (1925, 1933), True, 'import oneflow as flow\n'), ((4764, 4792), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""base"""'], {}), "('base')\n", (4784, 4792), True, 'import oneflow as flow\n'), ((4928, 4955), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""gap"""'], {}), "('gap')\n", (4948, 4955), True, 'import oneflow as flow\n'), ((4973, 5078), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['body'], {'ksize': '[16, 8]', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool5"""'}), "(body, ksize=[16, 8], strides=1, padding='VALID',\n data_format='NCHW', name='pool5')\n", (4991, 5078), True, 'import oneflow as flow\n'), ((5093, 5134), 'oneflow.reshape', 'flow.reshape', (['pool5', '[pool5.shape[0], -1]'], {}), '(pool5, [pool5.shape[0], -1])\n', (5105, 5134), True, 'import oneflow as flow\n'), ((6006, 6034), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""base"""'], {}), "('base')\n", (6026, 6034), True, 'import oneflow as flow\n'), ((6171, 6198), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""gap"""'], {}), "('gap')\n", (6191, 6198), True, 'import oneflow as flow\n'), ((6216, 6320), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['body'], {'ksize': '[4, 8]', 'strides': '(4)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool5"""'}), "(body, ksize=[4, 8], strides=4, padding='VALID',\n data_format='NCHW', name='pool5')\n", (6234, 6320), True, 'import oneflow as flow\n'), ((6335, 6376), 'oneflow.reshape', 'flow.reshape', (['pool5', '[pool5.shape[0], -1]'], {}), '(pool5, [pool5.shape[0], -1])\n', (6347, 6376), True, 'import oneflow as flow\n'), ((5331, 5359), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (5356, 5359), True, 'import oneflow as flow\n'), ((5391, 5442), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(1)', 'stddev': '(0.02)'}), '(mean=1, stddev=0.02)\n', (5421, 5442), True, 'import oneflow as flow\n'), ((5686, 5737), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(0)', 'stddev': '(0.01)'}), '(mean=0, stddev=0.01)\n', (5716, 5737), True, 'import oneflow as flow\n'), ((6573, 6601), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (6598, 6601), True, 'import oneflow as flow\n'), ((6633, 6684), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(1)', 'stddev': '(0.02)'}), '(mean=1, stddev=0.02)\n', (6663, 6684), True, 'import oneflow as flow\n'), ((6928, 6979), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(0)', 'stddev': '(0.01)'}), '(mean=0, stddev=0.01)\n', (6958, 6979), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import random import cv2 import numpy as np import PIL import oneflow as flow global_coco_dict = dict() default_coco_anno_file = "/dataset/mscoco_2017/annotations/instances_val2017.json" default_coco_image_dir = "/dataset/mscoco_2017/val2017" def get_coco(anno_file): global global_coco_dict if anno_file not in global_coco_dict: from pycocotools.coco import COCO global_coco_dict[anno_file] = COCO(anno_file) return global_coco_dict[anno_file] def random_sample_images_from_coco( anno_file=default_coco_anno_file, image_dir=default_coco_image_dir, batch_size=2 ): image_files = [] image_ids = [] batch_group_id = -1 coco = get_coco(anno_file) img_ids = coco.getImgIds() while len(image_files) < batch_size: rand_img_id = random.choice(img_ids) img_h = coco.imgs[rand_img_id]["height"] img_w = coco.imgs[rand_img_id]["width"] group_id = int(img_h / img_w) if batch_group_id == -1: batch_group_id = group_id if group_id != batch_group_id: continue image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"])) image_ids.append(rand_img_id) assert len(image_files) == len(image_ids) return (image_files, image_ids) def read_images_by_cv(image_files, dtype, channels=3): np_dtype = flow.convert_oneflow_dtype_to_numpy_dtype(dtype) images = [cv2.imread(image_file).astype(np_dtype) for image_file in image_files] assert all((isinstance(image, np.ndarray) for image in images)) assert all((image.ndim == 3 for image in images)) assert all((image.shape[2] == channels for image in images)) return images def read_images_by_pil(image_files, dtype, channels=3): image_objs = [PIL.Image.open(image_file) for image_file in image_files] images = [] np_dtype = flow.convert_oneflow_dtype_to_numpy_dtype(dtype) for im in image_objs: bands = im.getbands() band = "".join(bands) if band == "RGB": images.append(np.asarray(im).astype(np_dtype)[:, :, ::-1]) elif band == "L": gs_image = np.asarray(im).astype(np_dtype) gs_image_shape = gs_image.shape assert len(gs_image_shape) == 2 gs_image = gs_image.reshape(gs_image_shape + (1,)) gs_image = np.broadcast_to(gs_image, shape=gs_image_shape + (3,)) images.append(gs_image) elif band == "BGR": images.append(np.asarray(im).astype(np_dtype)) else: raise NotImplementedError assert all((isinstance(image, np.ndarray) for image in images)) assert all((image.ndim == 3 for image in images)) assert all((image.shape[2] == channels for image in images)) return images def infer_images_static_shape(images, channels=3): image_shapes = [image.shape for image in images] assert all((image.ndim == 3 for image in images)) assert all((image.shape[2] == channels for image in images)) image_shapes = np.asarray(image_shapes) max_h = np.max(image_shapes[:, 0]).item() max_w = np.max(image_shapes[:, 1]).item() image_static_shape = (len(images), max_h, max_w, channels) group_ids = [] aspect_ratio_list = [] for image_shape in image_shapes: (h, w) = image_shape[0:2] if h < w: group_id = 0 aspect_ratio = h / w else: group_id = 1 aspect_ratio = w / h group_ids.append(group_id) aspect_ratio_list.append(aspect_ratio) assert all((group_id == group_ids[0] for group_id in group_ids)) return (image_static_shape, aspect_ratio_list) def compute_keep_aspect_ratio_resized_size( target_size, min_size, max_size, aspect_ratio, resize_side ): if resize_side == "shorter": min_res_size = target_size max_res_size = int(round(min_res_size / aspect_ratio)) if max_size is not None and max_res_size > max_size: max_res_size = max_size min_res_size = int(round(max_res_size * aspect_ratio)) elif resize_side == "longer": max_res_size = target_size min_res_size = int(round(max_res_size * aspect_ratio)) if min_size is not None and min_res_size < min_size: min_res_size = min_size max_res_size = int(round(min_res_size / aspect_ratio)) else: raise NotImplementedError return (min_res_size, max_res_size) def infer_keep_aspect_ratio_resized_images_static_shape( target_size, min_size, max_size, aspect_ratio_list, resize_side="shorter", channels=3, ): resized_size_list = [] for aspect_ratio in aspect_ratio_list: resized_size_list.append( compute_keep_aspect_ratio_resized_size( target_size, min_size, max_size, aspect_ratio, resize_side ) ) (res_min_size, res_max_size) = max( resized_size_list, key=lambda size: size[0] * size[1] ) return (res_min_size, res_max_size, channels)
[ "oneflow.convert_oneflow_dtype_to_numpy_dtype" ]
[((1964, 2012), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['dtype'], {}), '(dtype)\n', (2005, 2012), True, 'import oneflow as flow\n'), ((2468, 2516), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['dtype'], {}), '(dtype)\n', (2509, 2516), True, 'import oneflow as flow\n'), ((3634, 3658), 'numpy.asarray', 'np.asarray', (['image_shapes'], {}), '(image_shapes)\n', (3644, 3658), True, 'import numpy as np\n'), ((1024, 1039), 'pycocotools.coco.COCO', 'COCO', (['anno_file'], {}), '(anno_file)\n', (1028, 1039), False, 'from pycocotools.coco import COCO\n'), ((1394, 1416), 'random.choice', 'random.choice', (['img_ids'], {}), '(img_ids)\n', (1407, 1416), False, 'import random\n'), ((2379, 2405), 'PIL.Image.open', 'PIL.Image.open', (['image_file'], {}), '(image_file)\n', (2393, 2405), False, 'import PIL\n'), ((1710, 1770), 'os.path.join', 'os.path.join', (['image_dir', "coco.imgs[rand_img_id]['file_name']"], {}), "(image_dir, coco.imgs[rand_img_id]['file_name'])\n", (1722, 1770), False, 'import os\n'), ((3671, 3697), 'numpy.max', 'np.max', (['image_shapes[:, 0]'], {}), '(image_shapes[:, 0])\n', (3677, 3697), True, 'import numpy as np\n'), ((3717, 3743), 'numpy.max', 'np.max', (['image_shapes[:, 1]'], {}), '(image_shapes[:, 1])\n', (3723, 3743), True, 'import numpy as np\n'), ((2027, 2049), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (2037, 2049), False, 'import cv2\n'), ((2955, 3009), 'numpy.broadcast_to', 'np.broadcast_to', (['gs_image'], {'shape': '(gs_image_shape + (3,))'}), '(gs_image, shape=gs_image_shape + (3,))\n', (2970, 3009), True, 'import numpy as np\n'), ((2749, 2763), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2759, 2763), True, 'import numpy as np\n'), ((2655, 2669), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2665, 2669), True, 'import numpy as np\n'), ((3100, 3114), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3110, 3114), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.amax, """ oneflow.amax(input, dim=None, keepdim=False) -> Tensor This function is equivalent to PyTorch’s amax function. It returns the maximum along a dimension. Args: input (oneflow.Tensor): the input Tensor. dim (int or List of int, optional): the dimension or the dimensions to reduce. Dim is None by default. keepdim (bool, optional): whether to retain the dimension. keepdim is False by default. Returns: oneflow.Tensor: Maximum of the input tensor For example: .. code-block:: python >>> import oneflow as flow >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> flow.amax(x, 1) tensor([[2, 3], [6, 7]], dtype=oneflow.int64) >>> flow.amax(x, 0) tensor([[4, 5], [6, 7]], dtype=oneflow.int64) >>> flow.amax(x) tensor(7, dtype=oneflow.int64) >>> flow.amax(x, 0, True) tensor([[[4, 5], [6, 7]]], dtype=oneflow.int64) """, )
[ "oneflow.framework.docstr.utils.add_docstr" ]
[((661, 1733), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.amax', '"""\n oneflow.amax(input, dim=None, keepdim=False) -> Tensor\n\n This function is equivalent to PyTorch’s amax function. It returns the maximum along a dimension.\n\n Args:\n input (oneflow.Tensor): the input Tensor.\n dim (int or List of int, optional): the dimension or the dimensions to reduce. Dim is None by default. \n keepdim (bool, optional): whether to retain the dimension. keepdim is False by default. \n\n Returns:\n oneflow.Tensor: Maximum of the input tensor\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> flow.amax(x, 1)\n tensor([[2, 3],\n [6, 7]], dtype=oneflow.int64)\n >>> flow.amax(x, 0)\n tensor([[4, 5],\n [6, 7]], dtype=oneflow.int64)\n >>> flow.amax(x)\n tensor(7, dtype=oneflow.int64)\n >>> flow.amax(x, 0, True)\n tensor([[[4, 5],\n [6, 7]]], dtype=oneflow.int64)\n """'], {}), '(oneflow.amax,\n """\n oneflow.amax(input, dim=None, keepdim=False) -> Tensor\n\n This function is equivalent to PyTorch’s amax function. It returns the maximum along a dimension.\n\n Args:\n input (oneflow.Tensor): the input Tensor.\n dim (int or List of int, optional): the dimension or the dimensions to reduce. Dim is None by default. \n keepdim (bool, optional): whether to retain the dimension. keepdim is False by default. \n\n Returns:\n oneflow.Tensor: Maximum of the input tensor\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> flow.amax(x, 1)\n tensor([[2, 3],\n [6, 7]], dtype=oneflow.int64)\n >>> flow.amax(x, 0)\n tensor([[4, 5],\n [6, 7]], dtype=oneflow.int64)\n >>> flow.amax(x)\n tensor(7, dtype=oneflow.int64)\n >>> flow.amax(x, 0, True)\n tensor([[[4, 5],\n [6, 7]]], dtype=oneflow.int64)\n """\n )\n', (671, 1733), False, 'from oneflow.framework.docstr.utils import add_docstr\n')]
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import oneflow as flow from .build import SCHEDULER_REGISTRY logger = logging.getLogger(__name__) @SCHEDULER_REGISTRY.register() def WarmupCosineLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, alpha: float = 0.0, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. alpha (float, optional): The learning rate scale factor (:math:`\\alpha`). Defaults to 0.0. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ cosine_decay_lr = flow.optim.lr_scheduler.CosineDecayLR( optimizer, decay_steps=max_iter, alpha=alpha ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return CosineLR") return cosine_decay_lr elif warmup_iter > max_iter: logger.warning("warmup iters is larger than the total training iters") warmup_cosine_lr = flow.optim.lr_scheduler.WarmUpLR( cosine_decay_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_cosine_lr @SCHEDULER_REGISTRY.register() def WarmupCosineAnnealingLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, eta_min: float = 0.0, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Cosine Annealing function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. eta_min (float, optional): Minimum learning rate. Defaults to 0.0. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=max_iter, eta_min=eta_min ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return CosineAnnealingLR") return cosine_annealing_lr warmup_cosine_annealing_lr = flow.optim.lr_scheduler.WarmUpLR( cosine_annealing_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_cosine_annealing_lr @SCHEDULER_REGISTRY.register() def WarmupMultiStepLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, milestones: list, gamma: float = 0.1, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the MultiStep function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. milestones (list): List of step indices. Must be increasing. gamma (float, optional): Multiplicative factor of learning rate decay. Defaults to 0.1. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ multistep_lr = flow.optim.lr_scheduler.MultiStepLR( optimizer, milestones=milestones, gamma=gamma ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return MultiStepLR") return multistep_lr warmup_multistep_lr = flow.optim.lr_scheduler.WarmUpLR( multistep_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_multistep_lr @SCHEDULER_REGISTRY.register() def WarmupExponentialLR( optimizer: flow.optim.Optimizer, max_iter: int, gamma: float, warmup_factor: float, warmup_iter: int, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Exponential function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. gamma (float): Multiplicative factor of learning rate decay. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ exponential_lr = flow.optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return ExponentialLR") return exponential_lr warmup_exponential_lr = flow.optim.lr_scheduler.WarmUpLR( exponential_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_exponential_lr @SCHEDULER_REGISTRY.register() def WarmupPolynomialLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, end_learning_rate: float = 0.0001, power: float = 1.0, cycle: bool = False, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. end_learning_rate (float, optional): The final learning rate. Defaults to 0.0001. power (float, optional): The power of polynomial. Defaults to 1.0. cycle (bool, optional): If cycle is True, the scheduler will decay the learning rate every decay steps. Defaults to False. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ polynomial_lr = flow.optim.lr_scheduler.PolynomialLR( optimizer, steps=max_iter, end_learning_rate=end_learning_rate, power=power, cycle=cycle ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return PolynomialLR") return polynomial_lr warmup_polynomial_lr = flow.optim.lr_scheduler.WarmUpLR( polynomial_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_polynomial_lr
[ "oneflow.optim.lr_scheduler.PolynomialLR", "oneflow.optim.lr_scheduler.ExponentialLR", "oneflow.optim.lr_scheduler.MultiStepLR", "oneflow.optim.lr_scheduler.WarmUpLR", "oneflow.optim.lr_scheduler.CosineDecayLR", "oneflow.optim.lr_scheduler.CosineAnnealingLR" ]
[((709, 736), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (726, 736), False, 'import logging\n'), ((1856, 1943), 'oneflow.optim.lr_scheduler.CosineDecayLR', 'flow.optim.lr_scheduler.CosineDecayLR', (['optimizer'], {'decay_steps': 'max_iter', 'alpha': 'alpha'}), '(optimizer, decay_steps=max_iter,\n alpha=alpha)\n', (1893, 1943), True, 'import oneflow as flow\n'), ((2216, 2354), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['cosine_decay_lr'], {'warmup_factor': 'warmup_factor', 'warmup_iters': 'warmup_iter', 'warmup_method': 'warmup_method'}), '(cosine_decay_lr, warmup_factor=\n warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method)\n', (2248, 2354), True, 'import oneflow as flow\n'), ((3536, 3625), 'oneflow.optim.lr_scheduler.CosineAnnealingLR', 'flow.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'max_iter', 'eta_min': 'eta_min'}), '(optimizer, T_max=max_iter,\n eta_min=eta_min)\n', (3577, 3625), True, 'import oneflow as flow\n'), ((3809, 3951), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['cosine_annealing_lr'], {'warmup_factor': 'warmup_factor', 'warmup_iters': 'warmup_iter', 'warmup_method': 'warmup_method'}), '(cosine_annealing_lr, warmup_factor=\n warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method)\n', (3841, 3951), True, 'import oneflow as flow\n'), ((5229, 5316), 'oneflow.optim.lr_scheduler.MultiStepLR', 'flow.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'milestones', 'gamma': 'gamma'}), '(optimizer, milestones=milestones, gamma\n =gamma)\n', (5264, 5316), True, 'import oneflow as flow\n'), ((5479, 5613), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['multistep_lr'], {'warmup_factor': 'warmup_factor', 'warmup_iters': 'warmup_iter', 'warmup_method': 'warmup_method'}), '(multistep_lr, warmup_factor=warmup_factor,\n warmup_iters=warmup_iter, warmup_method=warmup_method)\n', (5511, 5613), True, 'import oneflow as flow\n'), ((6771, 6832), 'oneflow.optim.lr_scheduler.ExponentialLR', 'flow.optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': 'gamma'}), '(optimizer, gamma=gamma)\n', (6808, 6832), True, 'import oneflow as flow\n'), ((6992, 7129), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['exponential_lr'], {'warmup_factor': 'warmup_factor', 'warmup_iters': 'warmup_iter', 'warmup_method': 'warmup_method'}), '(exponential_lr, warmup_factor=\n warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method)\n', (7024, 7129), True, 'import oneflow as flow\n'), ((8587, 8717), 'oneflow.optim.lr_scheduler.PolynomialLR', 'flow.optim.lr_scheduler.PolynomialLR', (['optimizer'], {'steps': 'max_iter', 'end_learning_rate': 'end_learning_rate', 'power': 'power', 'cycle': 'cycle'}), '(optimizer, steps=max_iter,\n end_learning_rate=end_learning_rate, power=power, cycle=cycle)\n', (8623, 8717), True, 'import oneflow as flow\n'), ((8884, 9019), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['polynomial_lr'], {'warmup_factor': 'warmup_factor', 'warmup_iters': 'warmup_iter', 'warmup_method': 'warmup_method'}), '(polynomial_lr, warmup_factor=warmup_factor,\n warmup_iters=warmup_iter, warmup_method=warmup_method)\n', (8916, 9019), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from typing import Optional, Sequence import oneflow as flow import oneflow._oneflow_internal import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util import oneflow.core.job.regularizer_conf_pb2 as regularizer_conf_util import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.framework.distribute as distribute_util import oneflow.framework.remote_blob as remote_blob_util def prelu( inputs: oneflow._oneflow_internal.BlobDesc, alpha_initializer: Optional[initializer_conf_util.InitializerConf] = None, alpha_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None, shared_axes: Optional[Sequence[int]] = None, trainable: bool = True, name: str = "PRelu", model_distribute: oneflow._oneflow_internal.distribute.Distribute = oneflow._oneflow_internal.distribute.broadcast(), ) -> oneflow._oneflow_internal.BlobDesc: """The Prelu(Parametric Rectified Linear Unit) activation. The :math:`\\alpha` is a parameter that can be trained in network The equation is .. math:: out = max(0, x) + \\alpha*min(0, x) Args: inputs (oneflow._oneflow_internal.BlobDesc): The input Blob. alpha_initializer (Optional[initializer_conf_util.InitializerConf], optional): The initializer of alpha. Defaults to None. alpha_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): The regularizer of alpha. Defaults to None. shared_axes (Optional[Sequence[int]], optional): The axis along which to share learnable parameters for the prelu activation function. Defaults to None. trainable (bool, optional): Whether to train the parameter :math:`\\alpha`. Defaults to True. name (str, optional): The name for the operation. Defaults to "PRelu". model_distribute (oneflow._oneflow_internal.distribute.Distribute, optional): Define the way to ditribute the model. Defaults to oneflow._oneflow_internal.distribute.broadcast(). Returns: oneflow._oneflow_internal.BlobDesc: The activated Blob For example: .. code-block:: python import oneflow as flow import oneflow.typing as tp BATCH_SIZE = 100 def lenet(data, train=False): initializer = flow.truncated_normal(0.1) conv1 = flow.layers.conv2d( data, 32, 5, padding="SAME", name="conv1", kernel_initializer=initializer, ) prelu1 = flow.layers.prelu(conv1, alpha_initializer=initializer, shared_axes=[2, 3], name="Prelu1") pool1 = flow.nn.max_pool2d( prelu1, ksize=2, strides=2, padding="SAME", name="pool1", data_format="NCHW" ) conv2 = flow.layers.conv2d( pool1, 64, 5, padding="SAME", name="conv2", kernel_initializer=initializer, ) prelu2 = flow.layers.prelu(conv2, alpha_initializer=initializer, shared_axes=[2, 3], name="Prelu2") pool2 = flow.nn.max_pool2d( prelu2, ksize=2, strides=2, padding="SAME", name="pool2", data_format="NCHW" ) reshape = flow.reshape(pool2, [pool2.shape[0], -1]) hidden = flow.layers.dense( reshape, 512, activation=flow.nn.relu, kernel_initializer=initializer, name="dense1", ) if train: hidden = flow.nn.dropout(hidden, rate=0.5, name="dropout") return flow.layers.dense(hidden, 10, kernel_initializer=initializer, name="dense2") @flow.global_function(type="train") def train_job( images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float), labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32), ) -> tp.Numpy: with flow.scope.placement("gpu", "0:0"): logits = lenet(images, train=True) loss = flow.nn.sparse_softmax_cross_entropy_with_logits( labels, logits, name="softmax_loss" ) lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1]) flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(loss) return loss """ alpha_shape = list(inputs.shape[1:]) if shared_axes is not None: for i in shared_axes: assert i >= 1 and i < len(inputs.shape) alpha_shape[i - 1] = 1 if alpha_initializer is None: alpha_initializer = flow.constant_initializer(0) with flow.scope.namespace(name): alpha = flow.get_variable( name="alpha", shape=alpha_shape, dtype=inputs.dtype, initializer=alpha_initializer, regularizer=alpha_regularizer, trainable=trainable, distribute=model_distribute, reuse=False, ) op = ( flow.user_op_builder(name) .Op("prelu") .Input("x", [inputs]) .Input("alpha", [alpha]) .Output("y") .Build() ) return op.InferAndTryRun().SoleOutputBlob()
[ "oneflow.user_op_builder", "oneflow.scope.namespace", "oneflow.constant_initializer", "oneflow.get_variable" ]
[((5487, 5515), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (5512, 5515), True, 'import oneflow as flow\n'), ((5525, 5551), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (5545, 5551), True, 'import oneflow as flow\n'), ((5569, 5773), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""alpha"""', 'shape': 'alpha_shape', 'dtype': 'inputs.dtype', 'initializer': 'alpha_initializer', 'regularizer': 'alpha_regularizer', 'trainable': 'trainable', 'distribute': 'model_distribute', 'reuse': '(False)'}), "(name='alpha', shape=alpha_shape, dtype=inputs.dtype,\n initializer=alpha_initializer, regularizer=alpha_regularizer, trainable\n =trainable, distribute=model_distribute, reuse=False)\n", (5586, 5773), True, 'import oneflow as flow\n'), ((5891, 5917), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (5911, 5917), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.nms, """ Performs non-maximum suppression (NMS) on the boxes according to their intersection-over-union (IoU). NMS iteratively removes lower scoring boxes which have an IoU greater than iou_threshold with another (higher scoring) box. Args: boxes (Tensor[N, 4]): boxes to perform NMS on. They are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and ``0 <= y1 < y2``. scores (Tensor[N]): scores for each one of the boxes iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold Returns: Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores """, )
[ "oneflow.framework.docstr.utils.add_docstr" ]
[((660, 1430), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.nms', '"""\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n Args:\n boxes (Tensor[N, 4]): boxes to perform NMS on. They\n are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and\n ``0 <= y1 < y2``.\n scores (Tensor[N]): scores for each one of the boxes\n iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold\n\n Returns:\n Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores\n """'], {}), '(oneflow.nms,\n """\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n Args:\n boxes (Tensor[N, 4]): boxes to perform NMS on. They\n are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and\n ``0 <= y1 < y2``.\n scores (Tensor[N]): scores for each one of the boxes\n iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold\n\n Returns:\n Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores\n """\n )\n', (670, 1430), False, 'from oneflow.framework.docstr.utils import add_docstr\n')]
""" Modified from https://github.com/tmp-iclr/convmixer/blob/main/convmixer.py """ import oneflow as flow import oneflow.nn as nn from .registry import ModelCreator from .utils import load_state_dict_from_url __all__ = [ "ConvMixer", "convmixer_1536_20", "convmixer_768_32_relu", "convmixer_1024_20", ] model_urls = { "convmixer_768_32_relu": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvMixer/convmixer_768_32_ks7_p7_relu.zip", "convmixer_1024_20": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvMixer/convmixer_1024_20_ks9_p14.zip", "convmixer_1536_20": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvMixer/convmixer_1536_20_ks9_p7.zip", } class ResidualAdd(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x def ConvMixer( dim, depth, kernel_size=9, patch_size=7, n_classes=1000, activation=nn.GELU ): return nn.Sequential( nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size), activation(), nn.BatchNorm2d(dim), *[ nn.Sequential( ResidualAdd( nn.Sequential( nn.Conv2d( dim, dim, kernel_size, groups=dim, padding=kernel_size // 2 ), activation(), nn.BatchNorm2d(dim), ) ), nn.Conv2d(dim, dim, kernel_size=1), activation(), nn.BatchNorm2d(dim), ) for i in range(depth) ], nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(dim, n_classes) ) @ModelCreator.register_model def convmixer_1536_20(pretrained: bool = False, progress: bool = True, **kwargs): """ Constructs the ConvMixer model with 20 depth and 1536 hidden size. .. note:: ConvMixer model with 20 depth and 1536 hidden size from the `Patched Are All You Need? <https://openreview.net/pdf?id=TVHS5Y4dNvM>`_ paper. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> convmixer_1536_20 = flowvision.models.convmixer_1536_20(pretrained=False, progress=True) """ model = ConvMixer(1536, 20, kernel_size=9, patch_size=7, n_classes=1000) if pretrained: state_dict = load_state_dict_from_url( model_urls["convmixer_1536_20"], model_dir="./checkpoints", progress=progress, ) model.load_state_dict(state_dict) return model @ModelCreator.register_model def convmixer_768_32_relu(pretrained: bool = False, progress: bool = True, **kwargs): """ Constructs the ConvMixer model with 32 depth and 768 hidden size and ReLU activation layer. .. note:: ConvMixer model with 32 depth and 768 hidden size and ReLU activation layer from the `Patched Are All You Need? <https://openreview.net/pdf?id=TVHS5Y4dNvM>`_ paper. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> convmixer_768_32_relu = flowvision.models.convmixer_768_32_relu(pretrained=False, progress=True) """ model = ConvMixer( 768, 32, kernel_size=7, patch_size=7, n_classes=1000, activation=nn.ReLU ) if pretrained: state_dict = load_state_dict_from_url( model_urls["convmixer_768_32_relu"], model_dir="./checkpoints", progress=progress, ) model.load_state_dict(state_dict) return model @ModelCreator.register_model def convmixer_1024_20(pretrained: bool = False, progress: bool = True, **kwargs): """ Constructs the ConvMixer model with 20 depth and 1024 hidden size. .. note:: ConvMixer model with 20 depth and 1024 hidden size from the `Patched Are All You Need? <https://openreview.net/pdf?id=TVHS5Y4dNvM>`_ paper. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> convmixer_1024_20 = flowvision.models.convmixer_1024_20(pretrained=False, progress=True) """ model = ConvMixer(1024, 20, kernel_size=9, patch_size=14, n_classes=1000) if pretrained: state_dict = load_state_dict_from_url( model_urls["convmixer_1024_20"], model_dir="./checkpoints", progress=progress, ) model.load_state_dict(state_dict) return model
[ "oneflow.nn.Linear", "oneflow.nn.Conv2d", "oneflow.nn.BatchNorm2d", "oneflow.nn.AdaptiveAvgPool2d", "oneflow.nn.Flatten" ]
[((1109, 1169), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', 'dim'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(3, dim, kernel_size=patch_size, stride=patch_size)\n', (1118, 1169), True, 'import oneflow.nn as nn\n'), ((1201, 1220), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim'], {}), '(dim)\n', (1215, 1220), True, 'import oneflow.nn as nn\n'), ((1784, 1812), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (1804, 1812), True, 'import oneflow.nn as nn\n'), ((1822, 1834), 'oneflow.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1832, 1834), True, 'import oneflow.nn as nn\n'), ((1844, 1869), 'oneflow.nn.Linear', 'nn.Linear', (['dim', 'n_classes'], {}), '(dim, n_classes)\n', (1853, 1869), True, 'import oneflow.nn as nn\n'), ((1614, 1648), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(1)'}), '(dim, dim, kernel_size=1)\n', (1623, 1648), True, 'import oneflow.nn as nn\n'), ((1696, 1715), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim'], {}), '(dim)\n', (1710, 1715), True, 'import oneflow.nn as nn\n'), ((1348, 1418), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim', 'kernel_size'], {'groups': 'dim', 'padding': '(kernel_size // 2)'}), '(dim, dim, kernel_size, groups=dim, padding=kernel_size // 2)\n', (1357, 1418), True, 'import oneflow.nn as nn\n'), ((1536, 1555), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim'], {}), '(dim)\n', (1550, 1555), True, 'import oneflow.nn as nn\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from oneflow.test_utils.automated_test_util import * import oneflow as flow import oneflow.unittest @flow.unittest.skip_unless_1n1d() class TestCosineSimilarity(flow.unittest.TestCase): @autotest(n=3) def test_cosine_similartiy_module_with_random_data(test_case): device = random_device() a = random_tensor(ndim=2, dim0=10, dim1=128).to(device) b = random_tensor(ndim=2, dim0=10, dim1=128).to(device) cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6).to(device) cos.train(random()) output = cos(a, b) return output @autotest(n=3) def test_cosine_similartiy_functional_with_random_data(test_case): device = random_device() a = random_tensor(ndim=2, dim0=10, dim1=128).to(device) b = random_tensor(ndim=2, dim0=10, dim1=128).to(device) output = torch.nn.functional.cosine_similarity(a, b, dim=1, eps=1e-6) return output @autotest(n=3) def test_cosine_similartiy_broadcast_with_random_data(test_case): device = random_device() a = random_tensor(ndim=2, dim0=10, dim1=128).to(device) b = random_tensor(ndim=2, dim0=1, dim1=128).to(device) output = torch.nn.functional.cosine_similarity(a, b, dim=1, eps=1e-6) return output if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.skip_unless_1n1d" ]
[((711, 743), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (741, 743), True, 'import oneflow as flow\n'), ((1923, 1938), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1936, 1938), False, 'import unittest\n')]
""" Copyright 2020 Tianshu AI Platform. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np import pandas as pd import oneflow as flow import util.ofrecord_util as ofrecord_util from util.model_weights import modelWeight import json def InitNodes(args): if args.num_nodes > 1: assert args.num_nodes <= len(args.node_ips) flow.env.ctrl_port(12138) nodes = [] for ip in args.node_ips: addr_dict = {} addr_dict["addr"] = ip nodes.append(addr_dict) flow.env.machine(nodes) # laod cfg (model structure) def LoadCfg(args, model_load_dir, load_type): if model_load_dir: if args.model == "resnet": assert os.path.isdir(model_load_dir) of_weight_path = model_load_dir.rsplit("/",1)[0] + "/weights_profile_path" cfg_temp = [] cfg = [] weights_dict = modelWeight.load(of_weight_path) for name, profile_dict in weights_dict.items(): if name.endswith("weight") and "stem" not in name and "shortcut" not in name: shape=profile_dict["shape"] cfg_temp.append(shape[0]) cfg.append(cfg_temp[0:9]) cfg.append(cfg_temp[9:21]) cfg.append(cfg_temp[21:39]) cfg.append(cfg_temp[39:48]) cfg.append(cfg_temp[48]) if load_type == 'train': modelWeight.weights_dict = {} else: assert os.path.isdir(model_load_dir) of_weight_path = model_load_dir.rsplit("/",1)[0] + "/weights_profile_path" cfg = [] weights_dict = modelWeight.load(of_weight_path) for name, profile_dict in weights_dict.items(): if name.endswith("weight"): shape=profile_dict["shape"] cfg.append(shape[0]) # print(load_type, modelWeight.weights_dict) if load_type == 'train': modelWeight.weights_dict = {} else: if args.model == 'vgg': # cfg = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 4096, 4096, args.num_classes] cfg = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 128, args.num_classes] elif args.model == 'alexnet': cfg = [96, 256, 384, 384, 256, 4096, 4096, args.num_classes] elif args.model == 'alexnet_simple': cfg = [24, 96, 192, 192, 96, 1024, 1024, args.num_classes] elif args.model == 'lenet': cfg = [6, 16, 120, 84, args.num_classes] elif args.model == "resnet": cfg = [[64, 64, 256, 64, 64, 256, 64, 64, 256], [128, 128, 512, 128, 128, 512, 128, 128, 512, 128, 128, 512], [256, 256, 1024, 256, 256, 1024, 256, 256, 1024, 256, 256, 1024, 256, 256, 1024, 256, 256, 1024], [512, 512, 2048, 512, 512, 2048, 512, 512, 2048], args.num_classes] elif args.model == 'dnn_2': cfg = [128, args.num_classes] elif args.model == 'dnn_4': cfg = [4096, 256, 128, args.num_classes] else: cfg = [] if load_type == 'train': print('Model structure:', cfg) return cfg # laod cfg(model structure) def LoadData(args, load_type): # total_device_num = args.num_nodes * args.gpu_num_per_node # train_batch_size = total_device_num * args.batch_size_per_device # val_batch_size = total_device_num * args.val_batch_size_per_device if load_type == 'train': if args.train_data_dir: assert os.path.exists(args.train_data_dir) print("Loading data from {}".format(args.train_data_dir)) if args.data_type == 'imageNet': (labels, images) = ofrecord_util.load_imagenet_for_training(args) elif args.data_type == 'cifar10' or args.data_type == 'cifar100': (labels, images) = ofrecord_util.load_cifar_for_training(args) elif args.data_type == 'mnist' or args.data_type == 'mnist_32': (labels, images) = ofrecord_util.load_mnist_for_training(args) elif args.data_type == 'svhn': (labels, images) = ofrecord_util.load_svhn_for_training(args) elif args.data_type == 'random': (labels, images) = ofrecord_util.load_synthetic(args) else: (labels, images) = ofrecord_util.load_mydata_for_training(args) else: print("Loading synthetic data.") (labels, images) = ofrecord_util.load_synthetic(args) elif load_type == 'test': if args.val_data_dir: assert os.path.exists(args.val_data_dir) print("Loading data from {}".format(args.val_data_dir)) if args.data_type == 'imageNet': (labels, images) = ofrecord_util.load_imagenet_for_validation(args) elif args.data_type == 'cifar10' or args.data_type == 'cifar100': (labels, images) = ofrecord_util.load_cifar_for_training(args) elif args.data_type == 'mnist' or args.data_type == "mnist_32": (labels, images) = ofrecord_util.load_mnist_for_validation(args) elif args.data_type == 'svhn': (labels, images) = ofrecord_util.load_svhn_for_validation(args) elif args.data_type == 'random': (labels, images) = ofrecord_util.load_synthetic(args) else: (labels, images) = ofrecord_util.load_mydata_for_training(args) else: print("Loading synthetic data.") (labels, images) = ofrecord_util.load_synthetic(args) else: print("Loading synthetic data.") (labels, images) = ofrecord_util.load_synthetic(args) return labels, images #get save path and load path of model #def getSaveLoadDir(args): # if args.default_dir == 'train': # model_save_dir = './output/snapshots/model_base' # if args.data_type == 'imageNet': # if args.model == 'vgg': # model_load_dir = './model_init/vgg/model_init_imageNet/of_init_model' # elif args.model == 'alexnet': # model_load_dir = './model_init/alexnet/model_init_imageNet/of_init_model' # elif args.model == 'lenet': # model_load_dir = './model_init/lenet/model_init_imageNet/of_init_model' # elif args.data_type == 'cifar10': # if args.model == 'vgg': # model_load_dir = './model_init/vgg/model_init_cifar10/of_init_model' # elif args.model == 'alexnet': # model_load_dir = './model_init/alexnet/model_init_cifar10/of_init_model' # elif args.model == 'lenet': # model_load_dir = './model_init/lenet/model_init_cifar10/of_init_model' # elif args.default_dir == 'refine': # model_save_dir = './output/snapshots/model_refine' # model_load_dir = './output/snapshots/model_prune/model' # else: # model_save_dir = args.model_save_dir # model_load_dir = args.model_load_dir # return model_save_dir, model_load_dir class Snapshot(object): def __init__(self, model_save_dir, model_load_dir): self._model_save_dir = model_save_dir self._check_point = flow.train.CheckPoint() if model_load_dir: assert os.path.isdir(model_load_dir) print("Restoring model from {}.".format(model_load_dir)) self._check_point.load(model_load_dir) else: self._check_point.init() self.save('initial_model') print("Init model on demand.") def save(self, name): snapshot_save_path = os.path.join(self._model_save_dir, "snapshot_{}".format(name)) if not os.path.exists(snapshot_save_path): os.makedirs(snapshot_save_path) print("Saving model to {}.".format(snapshot_save_path)) self._check_point.save(snapshot_save_path) class Summary(object): def __init__(self, log_dir, config, filename='summary.csv'): self._filename = filename self._log_dir = log_dir if not os.path.exists(log_dir): os.makedirs(log_dir) self._metrics = pd.DataFrame({"epoch":0, "iter": 0, "legend": "cfg", "note": str(config)}, index=[0]) def scalar(self, legend, value, epoch, step=-1): # TODO: support rank(which device/gpu) df = pd.DataFrame( {"epoch": epoch, "iter": step, "legend": legend, "value": value, "rank": 0}, index=[0]) self._metrics = pd.concat([self._metrics, df], axis=0, sort=False) def save(self): save_path = os.path.join(self._log_dir, self._filename) self._metrics.to_csv(save_path, index=False) class StopWatch(object): def __init__(self): pass def start(self): self.start_time = time.time() self.last_split = self.start_time def split(self): now = time.time() duration = now - self.last_split self.last_split = now return duration def stop(self): self.stop_time = time.time() def duration(self): return self.stop_time - self.start_time def match_top_k(predictions, labels, top_k=1): max_k_preds = np.argpartition(predictions.numpy(), -top_k)[:, -top_k:] match_array = np.logical_or.reduce(max_k_preds==labels.reshape((-1, 1)), axis=1) num_matched = match_array.sum() return num_matched, match_array.shape[0] class Metric(object): def __init__(self, summary=None, save_summary_steps=-1, desc='train', calculate_batches=-1, batch_size=256, top_k=6, prediction_key='predictions', label_key='labels', loss_key=None): self.summary = summary self.save_summary = isinstance(self.summary, Summary) self.save_summary_steps = save_summary_steps self.desc = desc self.calculate_batches = calculate_batches self.top_k = top_k self.prediction_key = prediction_key self.label_key = label_key self.loss_key = loss_key self.teacher_model_size = 0 self.student_model_size = 0 if loss_key: self.fmt = "{}: epoch {}, iter {}, loss: {:.6f}, accuracy(top1): {:.6f}, accuracy(topk): {:.6f}, samples/s: {:.3f}" else: self.fmt = "{}: epoch {}, iter {}, accuracy(top1): {:.6f}, accuracy(topk): {:.6f}, samples/s: {:.3f}" self.timer = StopWatch() self.timer.start() self._clear() def _clear(self): self.top_1_num_matched = 0 self.top_k_num_matched = 0 self.num_samples = 0.0 def metric_cb(self, epoch, step, args=None, log_file=None): def callback(outputs): if step == 0: self._clear() if self.prediction_key: num_matched, num_samples = match_top_k(outputs[self.prediction_key], outputs[self.label_key]) self.top_1_num_matched += num_matched num_matched, _ = match_top_k(outputs[self.prediction_key], outputs[self.label_key], self.top_k) self.top_k_num_matched += num_matched else: num_samples = outputs[self.label_key].shape[0] self.num_samples += num_samples if (step + 1) % self.calculate_batches == 0: throughput = self.num_samples / self.timer.split() if self.prediction_key: top_1_accuracy = self.top_1_num_matched / self.num_samples top_k_accuracy = self.top_k_num_matched / self.num_samples else: top_1_accuracy = 0.0 top_k_accuracy = 0.0 if self.loss_key: loss = outputs[self.loss_key].mean() print(self.fmt.format(self.desc, epoch, step + 1, loss, top_1_accuracy, top_k_accuracy, throughput)) # print(outputs[self.prediction_key].numpy(), # outputs[self.label_key].numpy(), # outputs['logits'].numpy()) if self.save_summary: self.summary.scalar(self.desc+"_" + self.loss_key, loss, epoch, step) else: print('*'*106) print(self.fmt.format(self.desc, epoch, step + 1, top_1_accuracy, top_k_accuracy, throughput)) if self.desc=='validation': def getdirsize(dir): size = 0 for root, dirs, files in os.walk(dir): for name in files: if str(root[-2:]) == '-v' or str(root[-2:]) == '-m': pass else: tmp = os.path.getsize(os.path.join(root, name)) size += tmp # size += sum([os.path.getsize(os.path.join(root, name)) for name in files]) return size model_size = 0 if args.log_type == 'base_model': if os.path.exists(os.path.join(args.model_save_dir,'snapshot_initial_model')): self.teacher_model_size = getdirsize(os.path.join(args.model_save_dir,'snapshot_initial_model')) elif os.path.exists(os.path.join(args.model_save_dir,'snapshot_last')): self.teacher_model_size = getdirsize(os.path.join(args.model_save_dir,'snapshot_last')) elif os.path.exists(os.path.join(args.model_save_dir,'snapshot_epoch_0')): self.teacher_model_size = getdirsize(os.path.join(args.model_save_dir,'snapshot_epoch_0')) else: print('Error, not find {}'.format(args.model_save_dir)) model_size = self.teacher_model_size # 获取teacher model大小, 即 model_base/snapshot_initial_model 文件夹大小 elif args.log_type == 'prune_model': if os.path.exists(args.model_load_dir): self.student_model_size = getdirsize(args.model_load_dir) else: print('Error, not find {}'.format(args.model_load_dir)) model_size = self.student_model_size # 获取student model大小,即 model_prune/model 文件夹大小 save_dict = {"accuracy": "%.2f" % top_1_accuracy, "top_k_accuracy": "%.2f" % top_k_accuracy, "top_k": "%d" % self.top_k, "modelSize": "%d" % (model_size / 1024 / 1024), "reasoningTime": "%.2f" % throughput } # samples/second if args.log_type == 'base_model': if not os.path.exists(args.before_result_dir): os.makedirs(args.before_result_dir) with open(os.path.join(args.before_result_dir, "results_eval.json"), "w") as f: json.dump(save_dict, f) if args.log_type == 'prune_model': if not os.path.exists(args.after_result_dir): os.makedirs(args.after_result_dir) with open(os.path.join(args.after_result_dir, "results_eval.json"), "w") as f: json.dump(save_dict, f) if log_file: log_file.write("epoch"+str(epoch)+" top_1_accuracy: "+str(top_1_accuracy)+\ "; top_k_accuracy: "+str(top_k_accuracy)+"; "+str(throughput)+"samples/s\n") print('*'*106) self._clear() if self.save_summary: self.summary.scalar(self.desc + "_throughput", throughput, epoch, step) if self.prediction_key: self.summary.scalar(self.desc + "_top_1", top_1_accuracy, epoch, step) self.summary.scalar(self.desc + "_top_{}".format(self.top_k), top_k_accuracy, epoch, step) if self.save_summary: if (step + 1) % self.save_summary_steps == 0: self.summary.save() return callback
[ "oneflow.env.machine", "oneflow.env.ctrl_port", "oneflow.train.CheckPoint" ]
[((988, 1013), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['(12138)'], {}), '(12138)\n', (1006, 1013), True, 'import oneflow as flow\n'), ((1173, 1196), 'oneflow.env.machine', 'flow.env.machine', (['nodes'], {}), '(nodes)\n', (1189, 1196), True, 'import oneflow as flow\n'), ((8007, 8030), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (8028, 8030), True, 'import oneflow as flow\n'), ((9130, 9234), 'pandas.DataFrame', 'pd.DataFrame', (["{'epoch': epoch, 'iter': step, 'legend': legend, 'value': value, 'rank': 0}"], {'index': '[0]'}), "({'epoch': epoch, 'iter': step, 'legend': legend, 'value':\n value, 'rank': 0}, index=[0])\n", (9142, 9234), True, 'import pandas as pd\n'), ((9280, 9330), 'pandas.concat', 'pd.concat', (['[self._metrics, df]'], {'axis': '(0)', 'sort': '(False)'}), '([self._metrics, df], axis=0, sort=False)\n', (9289, 9330), True, 'import pandas as pd\n'), ((9372, 9415), 'os.path.join', 'os.path.join', (['self._log_dir', 'self._filename'], {}), '(self._log_dir, self._filename)\n', (9384, 9415), False, 'import os\n'), ((9581, 9592), 'time.time', 'time.time', ([], {}), '()\n', (9590, 9592), False, 'import time\n'), ((9671, 9682), 'time.time', 'time.time', ([], {}), '()\n', (9680, 9682), False, 'import time\n'), ((9824, 9835), 'time.time', 'time.time', ([], {}), '()\n', (9833, 9835), False, 'import time\n'), ((1358, 1387), 'os.path.isdir', 'os.path.isdir', (['model_load_dir'], {}), '(model_load_dir)\n', (1371, 1387), False, 'import os\n'), ((1550, 1582), 'util.model_weights.modelWeight.load', 'modelWeight.load', (['of_weight_path'], {}), '(of_weight_path)\n', (1566, 1582), False, 'from util.model_weights import modelWeight\n'), ((2166, 2195), 'os.path.isdir', 'os.path.isdir', (['model_load_dir'], {}), '(model_load_dir)\n', (2179, 2195), False, 'import os\n'), ((2332, 2364), 'util.model_weights.modelWeight.load', 'modelWeight.load', (['of_weight_path'], {}), '(of_weight_path)\n', (2348, 2364), False, 'from util.model_weights import modelWeight\n'), ((4294, 4329), 'os.path.exists', 'os.path.exists', (['args.train_data_dir'], {}), '(args.train_data_dir)\n', (4308, 4329), False, 'import os\n'), ((5263, 5297), 'util.ofrecord_util.load_synthetic', 'ofrecord_util.load_synthetic', (['args'], {}), '(args)\n', (5291, 5297), True, 'import util.ofrecord_util as ofrecord_util\n'), ((6461, 6495), 'util.ofrecord_util.load_synthetic', 'ofrecord_util.load_synthetic', (['args'], {}), '(args)\n', (6489, 6495), True, 'import util.ofrecord_util as ofrecord_util\n'), ((8077, 8106), 'os.path.isdir', 'os.path.isdir', (['model_load_dir'], {}), '(model_load_dir)\n', (8090, 8106), False, 'import os\n'), ((8494, 8528), 'os.path.exists', 'os.path.exists', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (8508, 8528), False, 'import os\n'), ((8542, 8573), 'os.makedirs', 'os.makedirs', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (8553, 8573), False, 'import os\n'), ((8860, 8883), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (8874, 8883), False, 'import os\n'), ((8885, 8905), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (8896, 8905), False, 'import os\n'), ((4480, 4526), 'util.ofrecord_util.load_imagenet_for_training', 'ofrecord_util.load_imagenet_for_training', (['args'], {}), '(args)\n', (4520, 4526), True, 'import util.ofrecord_util as ofrecord_util\n'), ((5377, 5410), 'os.path.exists', 'os.path.exists', (['args.val_data_dir'], {}), '(args.val_data_dir)\n', (5391, 5410), False, 'import os\n'), ((6348, 6382), 'util.ofrecord_util.load_synthetic', 'ofrecord_util.load_synthetic', (['args'], {}), '(args)\n', (6376, 6382), True, 'import util.ofrecord_util as ofrecord_util\n'), ((4640, 4683), 'util.ofrecord_util.load_cifar_for_training', 'ofrecord_util.load_cifar_for_training', (['args'], {}), '(args)\n', (4677, 4683), True, 'import util.ofrecord_util as ofrecord_util\n'), ((5559, 5607), 'util.ofrecord_util.load_imagenet_for_validation', 'ofrecord_util.load_imagenet_for_validation', (['args'], {}), '(args)\n', (5601, 5607), True, 'import util.ofrecord_util as ofrecord_util\n'), ((4795, 4838), 'util.ofrecord_util.load_mnist_for_training', 'ofrecord_util.load_mnist_for_training', (['args'], {}), '(args)\n', (4832, 4838), True, 'import util.ofrecord_util as ofrecord_util\n'), ((5721, 5764), 'util.ofrecord_util.load_cifar_for_training', 'ofrecord_util.load_cifar_for_training', (['args'], {}), '(args)\n', (5758, 5764), True, 'import util.ofrecord_util as ofrecord_util\n'), ((4917, 4959), 'util.ofrecord_util.load_svhn_for_training', 'ofrecord_util.load_svhn_for_training', (['args'], {}), '(args)\n', (4953, 4959), True, 'import util.ofrecord_util as ofrecord_util\n'), ((5876, 5921), 'util.ofrecord_util.load_mnist_for_validation', 'ofrecord_util.load_mnist_for_validation', (['args'], {}), '(args)\n', (5915, 5921), True, 'import util.ofrecord_util as ofrecord_util\n'), ((13488, 13500), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (13495, 13500), False, 'import os\n'), ((5040, 5074), 'util.ofrecord_util.load_synthetic', 'ofrecord_util.load_synthetic', (['args'], {}), '(args)\n', (5068, 5074), True, 'import util.ofrecord_util as ofrecord_util\n'), ((5128, 5172), 'util.ofrecord_util.load_mydata_for_training', 'ofrecord_util.load_mydata_for_training', (['args'], {}), '(args)\n', (5166, 5172), True, 'import util.ofrecord_util as ofrecord_util\n'), ((6000, 6044), 'util.ofrecord_util.load_svhn_for_validation', 'ofrecord_util.load_svhn_for_validation', (['args'], {}), '(args)\n', (6038, 6044), True, 'import util.ofrecord_util as ofrecord_util\n'), ((14161, 14220), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_initial_model"""'], {}), "(args.model_save_dir, 'snapshot_initial_model')\n", (14173, 14220), False, 'import os\n'), ((15140, 15175), 'os.path.exists', 'os.path.exists', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (15154, 15175), False, 'import os\n'), ((16031, 16069), 'os.path.exists', 'os.path.exists', (['args.before_result_dir'], {}), '(args.before_result_dir)\n', (16045, 16069), False, 'import os\n'), ((16103, 16138), 'os.makedirs', 'os.makedirs', (['args.before_result_dir'], {}), '(args.before_result_dir)\n', (16114, 16138), False, 'import os\n'), ((16279, 16302), 'json.dump', 'json.dump', (['save_dict', 'f'], {}), '(save_dict, f)\n', (16288, 16302), False, 'import json\n'), ((16397, 16434), 'os.path.exists', 'os.path.exists', (['args.after_result_dir'], {}), '(args.after_result_dir)\n', (16411, 16434), False, 'import os\n'), ((16468, 16502), 'os.makedirs', 'os.makedirs', (['args.after_result_dir'], {}), '(args.after_result_dir)\n', (16479, 16502), False, 'import os\n'), ((16642, 16665), 'json.dump', 'json.dump', (['save_dict', 'f'], {}), '(save_dict, f)\n', (16651, 16665), False, 'import json\n'), ((6125, 6159), 'util.ofrecord_util.load_synthetic', 'ofrecord_util.load_synthetic', (['args'], {}), '(args)\n', (6153, 6159), True, 'import util.ofrecord_util as ofrecord_util\n'), ((6213, 6257), 'util.ofrecord_util.load_mydata_for_training', 'ofrecord_util.load_mydata_for_training', (['args'], {}), '(args)\n', (6251, 6257), True, 'import util.ofrecord_util as ofrecord_util\n'), ((14291, 14350), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_initial_model"""'], {}), "(args.model_save_dir, 'snapshot_initial_model')\n", (14303, 14350), False, 'import os\n'), ((14399, 14449), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_last"""'], {}), "(args.model_save_dir, 'snapshot_last')\n", (14411, 14449), False, 'import os\n'), ((16177, 16234), 'os.path.join', 'os.path.join', (['args.before_result_dir', '"""results_eval.json"""'], {}), "(args.before_result_dir, 'results_eval.json')\n", (16189, 16234), False, 'import os\n'), ((16541, 16597), 'os.path.join', 'os.path.join', (['args.after_result_dir', '"""results_eval.json"""'], {}), "(args.after_result_dir, 'results_eval.json')\n", (16553, 16597), False, 'import os\n'), ((14520, 14570), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_last"""'], {}), "(args.model_save_dir, 'snapshot_last')\n", (14532, 14570), False, 'import os\n'), ((14619, 14672), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_epoch_0"""'], {}), "(args.model_save_dir, 'snapshot_epoch_0')\n", (14631, 14672), False, 'import os\n'), ((13791, 13815), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (13803, 13815), False, 'import os\n'), ((14743, 14796), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""snapshot_epoch_0"""'], {}), "(args.model_save_dir, 'snapshot_epoch_0')\n", (14755, 14796), False, 'import os\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np from oneflow.compatible import single_client as flow from oneflow.compatible.single_client import typing as oft # @flow.unittest.skip_unless_1n2d() # TODO(zhangwenxiao, jiangxuefei): refine in multi-client @unittest.skipIf(True, "skip for now because of single-client tensor_list removed") class TestDynamicReshape(flow.unittest.TestCase): def test_dynamic_reshape(test_case): data_shape = (10, 10, 10) flow.config.gpu_device_num(2) func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(type="train", function_config=func_config) def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)): reshape_out1 = flow.reshape(x, (-1, 20)) my_model = flow.get_variable( "my_model", shape=(20, 32), dtype=flow.float, initializer=flow.random_uniform_initializer(minval=-10, maxval=10), trainable=True, ) my_model = flow.cast_to_current_logical_view(my_model) mm_out = flow.matmul(reshape_out1, my_model) reshape_out2 = flow.reshape(mm_out, (-1, 8, 4)) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0 ).minimize(reshape_out2) return reshape_out1 data = [np.random.rand(*data_shape).astype(np.float32) for i in range(2)] out = DynamicReshapeJob(data).get().numpy_list() for i in range(2): test_case.assertTrue(np.array_equal(np.reshape(data[i], (50, 20)), out[i])) if __name__ == "__main__": unittest.main()
[ "oneflow.compatible.single_client.scope.mirrored_view", "oneflow.compatible.single_client.matmul", "oneflow.compatible.single_client.FunctionConfig", "oneflow.compatible.single_client.config.gpu_device_num", "oneflow.compatible.single_client.reshape", "oneflow.compatible.single_client.cast_to_current_logical_view", "oneflow.compatible.single_client.typing.ListNumpy.Placeholder", "oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler", "oneflow.compatible.single_client.random_uniform_initializer", "oneflow.compatible.single_client.global_function" ]
[((834, 920), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (849, 920), False, 'import unittest\n'), ((2364, 2379), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2377, 2379), False, 'import unittest\n'), ((1050, 1079), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1076, 1079), True, 'from oneflow.compatible import single_client as flow\n'), ((1102, 1123), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1121, 1123), True, 'from oneflow.compatible import single_client as flow\n'), ((1253, 1316), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1273, 1316), True, 'from oneflow.compatible import single_client as flow\n'), ((1215, 1241), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1239, 1241), True, 'from oneflow.compatible import single_client as flow\n'), ((1417, 1442), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['x', '(-1, 20)'], {}), '(x, (-1, 20))\n', (1429, 1442), True, 'from oneflow.compatible import single_client as flow\n'), ((1732, 1775), 'oneflow.compatible.single_client.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['my_model'], {}), '(my_model)\n', (1765, 1775), True, 'from oneflow.compatible import single_client as flow\n'), ((1797, 1832), 'oneflow.compatible.single_client.matmul', 'flow.matmul', (['reshape_out1', 'my_model'], {}), '(reshape_out1, my_model)\n', (1808, 1832), True, 'from oneflow.compatible import single_client as flow\n'), ((1860, 1892), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['mm_out', '(-1, 8, 4)'], {}), '(mm_out, (-1, 8, 4))\n', (1872, 1892), True, 'from oneflow.compatible import single_client as flow\n'), ((1350, 1387), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['data_shape'], {}), '(data_shape)\n', (1375, 1387), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1607, 1661), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (1638, 1661), True, 'from oneflow.compatible import single_client as flow\n'), ((2093, 2120), 'numpy.random.rand', 'np.random.rand', (['*data_shape'], {}), '(*data_shape)\n', (2107, 2120), True, 'import numpy as np\n'), ((2291, 2320), 'numpy.reshape', 'np.reshape', (['data[i]', '(50, 20)'], {}), '(data[i], (50, 20))\n', (2301, 2320), True, 'import numpy as np\n'), ((1941, 1996), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (1982, 1996), True, 'from oneflow.compatible import single_client as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np from test_util import GenArgList from optimizer_test_util import clip_grad_norm_np import oneflow as flow def compare_with_numpy_lamb( test_case, device, x_shape, learning_rate, train_iters, betas, weight_decay, eps, do_bias_correction, adam_w_mode, clip_grad_max_norm, clip_grad_norm_type, ): np.random.seed(1000) random_grad_seq = [] for _ in range(train_iters): random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32)) init_value = np.random.uniform(size=x_shape).astype(np.float32) class CustomModule(flow.nn.Module): def __init__(self): super().__init__() self.param = flow.nn.Parameter( flow.Tensor(init_value, device=flow.device(device)) ) def forward(self, mask): return self.param * mask simp_module = CustomModule() simp_module.to(device) simp_module.train() optim_kwargs = { "params": simp_module.parameters(), "lr": learning_rate, "betas": betas, "eps": eps, "weight_decay": weight_decay, "adam_w_mode": adam_w_mode, "do_bias_correction": do_bias_correction, } if clip_grad_max_norm != -1: optim_kwargs["clip_grad_max_norm"] = clip_grad_max_norm optim_kwargs["clip_grad_norm_type"] = clip_grad_norm_type lamb_optim = flow.optim.LAMB([optim_kwargs]) class CustomLambGraph(flow.nn.Graph): def __init__(self): super().__init__() self.m = simp_module self.add_optimizer(lamb_optim) def build(self, mask_tensor): loss = flow.sum(self.m(mask_tensor)) loss.backward() return loss lamb_graph = CustomLambGraph() for i in range(train_iters): mask_tensor = flow.tensor( random_grad_seq[i], dtype=flow.float32, requires_grad=False, device=flow.device(device), ) lamb_graph(mask_tensor) of_res = simp_module.param.numpy() def train_by_numpy(): x = init_value mt = np.zeros_like(x) vt = np.zeros_like(x) beta1 = betas[0] beta2 = betas[1] if adam_w_mode: l2 = 0 wd = weight_decay else: l2 = weight_decay wd = 0 def np_train_one_iter(step, grad): if clip_grad_max_norm != -1: _, grad = clip_grad_norm_np( grad, clip_grad_max_norm, clip_grad_norm_type ) grad = grad + l2 * x bias_correction1 = 1.0 bias_correction2 = 1.0 if do_bias_correction: bias_correction1 = 1.0 - np.power(beta1, step + 1) bias_correction2 = 1.0 - np.power(beta2, step + 1) m = beta1 * mt + (1 - beta1) * grad v = beta2 * vt + (1 - beta2) * grad * grad denom = np.sqrt(v) / np.sqrt(bias_correction2) + eps adam_diff = m / bias_correction1 / denom w_norm = np.linalg.norm(x, ord=2) g_norm = np.linalg.norm(adam_diff, ord=2) if w_norm > 0 and g_norm > 0: trust_ratio = w_norm / g_norm else: trust_ratio = 1.0 param = x - learning_rate * trust_ratio * (adam_diff + wd * x) return (param, m, v) for i in range(train_iters): (x, mt, vt) = np_train_one_iter(i, random_grad_seq[i]) return x np_res = train_by_numpy() test_case.assertTrue( np.allclose(of_res.flatten(), np_res.flatten(), rtol=1e-3, atol=1e-3) ) @flow.unittest.skip_unless_1n1d() class TestLamb(flow.unittest.TestCase): def test_lamb(test_case): arg_dict = OrderedDict() arg_dict["device"] = ["cpu", "cuda"] arg_dict["x_shape"] = [(10,)] arg_dict["learning_rate"] = [0.1, 1e-3] arg_dict["train_iters"] = [10] arg_dict["betas"] = [(0.99, 0.9)] arg_dict["weight_decay"] = [0.001, 0.1] arg_dict["eps"] = [1e-8, 1e-6] arg_dict["do_bias_correction"] = [True, False] arg_dict["adam_w_mode"] = [True, False] # NOTE(l1aoxingyu): max_norm = -1 means no clip grad # nn.Graph only support `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0` arg_dict["clip_grad_max_norm"] = [-1, 1.0] arg_dict["clip_grad_norm_type"] = [2.0] for arg in GenArgList(arg_dict): compare_with_numpy_lamb(test_case, *arg) if __name__ == "__main__": unittest.main()
[ "oneflow.optim.LAMB", "oneflow.device", "oneflow.unittest.skip_unless_1n1d" ]
[((4376, 4408), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4406, 4408), True, 'import oneflow as flow\n'), ((1012, 1032), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (1026, 1032), True, 'import numpy as np\n'), ((2076, 2107), 'oneflow.optim.LAMB', 'flow.optim.LAMB', (['[optim_kwargs]'], {}), '([optim_kwargs])\n', (2091, 2107), True, 'import oneflow as flow\n'), ((5295, 5310), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5308, 5310), False, 'import unittest\n'), ((2813, 2829), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2826, 2829), True, 'import numpy as np\n'), ((2843, 2859), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2856, 2859), True, 'import numpy as np\n'), ((4498, 4511), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4509, 4511), False, 'from collections import OrderedDict\n'), ((5187, 5207), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5197, 5207), False, 'from test_util import GenArgList\n'), ((1192, 1223), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1209, 1223), True, 'import numpy as np\n'), ((3781, 3805), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'ord': '(2)'}), '(x, ord=2)\n', (3795, 3805), True, 'import numpy as np\n'), ((3827, 3859), 'numpy.linalg.norm', 'np.linalg.norm', (['adam_diff'], {'ord': '(2)'}), '(adam_diff, ord=2)\n', (3841, 3859), True, 'import numpy as np\n'), ((2647, 2666), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2658, 2666), True, 'import oneflow as flow\n'), ((3157, 3221), 'optimizer_test_util.clip_grad_norm_np', 'clip_grad_norm_np', (['grad', 'clip_grad_max_norm', 'clip_grad_norm_type'], {}), '(grad, clip_grad_max_norm, clip_grad_norm_type)\n', (3174, 3221), False, 'from optimizer_test_util import clip_grad_norm_np\n'), ((1123, 1154), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1140, 1154), True, 'import numpy as np\n'), ((3442, 3467), 'numpy.power', 'np.power', (['beta1', '(step + 1)'], {}), '(beta1, step + 1)\n', (3450, 3467), True, 'import numpy as np\n'), ((3509, 3534), 'numpy.power', 'np.power', (['beta2', '(step + 1)'], {}), '(beta2, step + 1)\n', (3517, 3534), True, 'import numpy as np\n'), ((3660, 3670), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (3667, 3670), True, 'import numpy as np\n'), ((3673, 3698), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (3680, 3698), True, 'import numpy as np\n'), ((1434, 1453), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1445, 1453), True, 'import oneflow as flow\n')]
""" @author: <NAME> <<EMAIL>> """ import os import sys import numpy as np import soundfile as sf import oneflow as flow import oneflow.nn as nn import oneflow.optim as optim from model.dnn_models import MLP from model.SincNet import SincNet as CNN from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd # Reading cfg file options = read_conf() # [data] tr_lst = options.tr_lst te_lst = options.te_lst class_dict_file = options.lab_dict data_folder = options.data_folder + "/" output_folder = options.output_folder # [windowing] fs = int(options.fs) cw_len = int(options.cw_len) cw_shift = int(options.cw_shift) # [cnn] cnn_N_filt = list(map(int, options.cnn_N_filt.split(","))) cnn_len_filt = list(map(int, options.cnn_len_filt.split(","))) cnn_max_pool_len = list(map(int, options.cnn_max_pool_len.split(","))) cnn_use_laynorm_inp = str_to_bool(options.cnn_use_laynorm_inp) cnn_use_batchnorm_inp = str_to_bool(options.cnn_use_batchnorm_inp) cnn_use_laynorm = list(map(str_to_bool, options.cnn_use_laynorm.split(","))) cnn_use_batchnorm = list(map(str_to_bool, options.cnn_use_batchnorm.split(","))) cnn_act = list(map(str, options.cnn_act.split(","))) cnn_drop = list(map(float, options.cnn_drop.split(","))) # [dnn] fc_lay = list(map(int, options.fc_lay.split(","))) fc_drop = list(map(float, options.fc_drop.split(","))) fc_use_laynorm_inp = str_to_bool(options.fc_use_laynorm_inp) fc_use_batchnorm_inp = str_to_bool(options.fc_use_batchnorm_inp) fc_use_batchnorm = list(map(str_to_bool, options.fc_use_batchnorm.split(","))) fc_use_laynorm = list(map(str_to_bool, options.fc_use_laynorm.split(","))) fc_act = list(map(str, options.fc_act.split(","))) # [class] class_lay = list(map(int, options.class_lay.split(","))) class_drop = list(map(float, options.class_drop.split(","))) class_use_laynorm_inp = str_to_bool(options.class_use_laynorm_inp) class_use_batchnorm_inp = str_to_bool(options.class_use_batchnorm_inp) class_use_batchnorm = list(map(str_to_bool, options.class_use_batchnorm.split(","))) class_use_laynorm = list(map(str_to_bool, options.class_use_laynorm.split(","))) class_act = list(map(str, options.class_act.split(","))) # [optimization] lr = float(options.lr) batch_size = int(options.batch_size) N_epochs = int(options.N_epochs) N_batches = int(options.N_batches) N_eval_epoch = int(options.N_eval_epoch) seed = int(options.seed) # training list wav_lst_tr = ReadList(tr_lst) snt_tr = len(wav_lst_tr) # test list wav_lst_te = ReadList(te_lst) snt_te = len(wav_lst_te) # Folder creation try: os.stat(output_folder) except: os.mkdir(output_folder) # setting seed np.random.seed(seed) # loss function cost = nn.NLLLoss() cost.to("cuda") # Converting context and shift in samples wlen = int(fs * cw_len / 1000.00) # 3200 wshift = int(fs * cw_shift / 1000.00) # Batch_dev Batch_dev = 128 # Feature extractor CNN CNN_arch = { "input_dim": wlen, "fs": fs, "cnn_N_filt": cnn_N_filt, "cnn_len_filt": cnn_len_filt, "cnn_max_pool_len": cnn_max_pool_len, "cnn_use_laynorm_inp": cnn_use_laynorm_inp, "cnn_use_batchnorm_inp": cnn_use_batchnorm_inp, "cnn_use_laynorm": cnn_use_laynorm, "cnn_use_batchnorm": cnn_use_batchnorm, "cnn_act": cnn_act, "cnn_drop": cnn_drop, } CNN_net = CNN(CNN_arch) CNN_net.to("cuda") # Loading label dictionary lab_dict = np.load(class_dict_file, allow_pickle=True).item() DNN1_arch = { "input_dim": CNN_net.out_dim, "fc_lay": fc_lay, "fc_drop": fc_drop, "fc_use_batchnorm": fc_use_batchnorm, "fc_use_laynorm": fc_use_laynorm, "fc_use_laynorm_inp": fc_use_laynorm_inp, "fc_use_batchnorm_inp": fc_use_batchnorm_inp, "fc_act": fc_act, } DNN1_net = MLP(DNN1_arch) DNN1_net.to("cuda") DNN2_arch = { "input_dim": fc_lay[-1], "fc_lay": class_lay, "fc_drop": class_drop, "fc_use_batchnorm": class_use_batchnorm, "fc_use_laynorm": class_use_laynorm, "fc_use_laynorm_inp": class_use_laynorm_inp, "fc_use_batchnorm_inp": class_use_batchnorm_inp, "fc_act": class_act, } DNN2_net = MLP(DNN2_arch) DNN2_net.to("cuda") optimizer_CNN = optim.RMSprop(CNN_net.parameters(), lr=lr, alpha=0.95, eps=1e-6) optimizer_DNN1 = optim.RMSprop(DNN1_net.parameters(), lr=lr, alpha=0.95, eps=1e-6,) optimizer_DNN2 = optim.RMSprop(DNN2_net.parameters(), lr=lr, alpha=0.95, eps=1e-6) for epoch in range(N_epochs): CNN_net.train() DNN1_net.train() DNN2_net.train() loss_sum = 0 err_sum = 0 for i in range(N_batches): [inp, lab] = create_batches_rnd( batch_size, data_folder, wav_lst_tr, snt_tr, wlen, lab_dict, 0.2 ) pout = DNN2_net(DNN1_net(CNN_net(inp))) pred = flow.argmax(pout, dim=1) loss = cost(pout, lab.long()) if np.isnan(loss.numpy()): print("epoch: ", epoch, " batch: ", i, " isnan: True.") exit(0) err = np.mean(pred.numpy() != lab.long().numpy()) loss.backward() nn.utils.clip_grad_norm_(CNN_net.parameters(), 0.2) nn.utils.clip_grad_norm_(DNN1_net.parameters(), 0.2) nn.utils.clip_grad_norm_(DNN2_net.parameters(), 0.2) optimizer_CNN.step() optimizer_DNN1.step() optimizer_DNN2.step() optimizer_CNN.zero_grad() optimizer_DNN1.zero_grad() optimizer_DNN2.zero_grad() loss_sum = loss_sum + loss.detach() err_sum = err_sum + err loss_tot = loss_sum / N_batches err_tot = err_sum / N_batches # Full Validation new if epoch % N_eval_epoch == 0: CNN_net.eval() DNN1_net.eval() DNN2_net.eval() loss_sum = 0 err_sum = 0 err_sum_snt = 0 with flow.no_grad(): for i in range(snt_te): [signal, fs] = sf.read(data_folder + wav_lst_te[i]) signal = flow.Tensor(signal).to("cuda") lab_batch = lab_dict[wav_lst_te[i].lower()] # split signals into chunks beg_samp = 0 end_samp = wlen N_fr = int((signal.shape[0] - wlen) / (wshift)) sig_arr = flow.zeros((Batch_dev, wlen), dtype=flow.float32).to("cuda") lab = (flow.zeros(N_fr + 1) + lab_batch).to("cuda").long() pout = flow.zeros((N_fr + 1, class_lay[-1]), dtype=flow.float32).to( "cuda" ) count_fr = 0 count_fr_tot = 0 while end_samp < signal.shape[0]: sig_arr[count_fr, :] = signal[beg_samp:end_samp] beg_samp = beg_samp + wshift end_samp = beg_samp + wlen count_fr = count_fr + 1 count_fr_tot = count_fr_tot + 1 if count_fr == Batch_dev: inp = flow.Tensor(sig_arr).to(sig_arr.device) pout[count_fr_tot - Batch_dev : count_fr_tot, :] = DNN2_net( DNN1_net(CNN_net(inp)) ) count_fr = 0 sig_arr = flow.zeros((Batch_dev, wlen), dtype=flow.float32).to( "cuda" ) if count_fr > 0: inp = flow.Tensor(sig_arr[0:count_fr]).to(sig_arr.device) pout[count_fr_tot - count_fr : count_fr_tot, :] = DNN2_net( DNN1_net(CNN_net(inp)) ) pred = flow.argmax(pout, dim=1) loss = cost(pout, lab.long()) err = np.mean(pred.numpy() != lab.long().numpy()) best_class = flow.argmax(flow.sum(pout, dim=0), dim=0) err_sum_snt = err_sum_snt + (best_class.numpy() != lab[0].numpy()) loss_sum = loss_sum + loss.detach() err_sum = err_sum + err err_tot_dev_snt = err_sum_snt / snt_te loss_tot_dev = loss_sum / snt_te err_tot_dev = err_sum / snt_te print( "epoch %i, loss_tr=%f err_tr=%f loss_te=%f err_te=%f err_te_snt=%f" % ( epoch, loss_tot.numpy(), err_tot, loss_tot_dev.numpy(), err_tot_dev, err_tot_dev_snt, ) ) with open(output_folder + "/res.res", "a") as res_file: res_file.write( "epoch %i, loss_tr=%f err_tr=%f loss_te=%f err_te=%f err_te_snt=%f\n" % ( epoch, loss_tot.numpy(), err_tot, loss_tot_dev.numpy(), err_tot_dev, err_tot_dev_snt, ) ) flow.save( CNN_net.state_dict(), os.path.join(output_folder, "CNN_epoch_%d" % epoch) ) flow.save( DNN1_net.state_dict(), os.path.join(output_folder, "DNN1_epoch_%d" % epoch) ) flow.save( DNN2_net.state_dict(), os.path.join(output_folder, "DNN2_epoch_%d" % epoch) ) else: print("epoch %i, loss_tr=%f err_tr=%f" % (epoch, loss_tot.numpy(), err_tot))
[ "oneflow.argmax", "oneflow.sum", "oneflow.zeros", "oneflow.Tensor", "oneflow.nn.NLLLoss", "oneflow.no_grad" ]
[((363, 374), 'utils.data_utils.read_conf', 'read_conf', ([], {}), '()\n', (372, 374), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((868, 908), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.cnn_use_laynorm_inp'], {}), '(options.cnn_use_laynorm_inp)\n', (879, 908), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((933, 975), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.cnn_use_batchnorm_inp'], {}), '(options.cnn_use_batchnorm_inp)\n', (944, 975), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((1380, 1419), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.fc_use_laynorm_inp'], {}), '(options.fc_use_laynorm_inp)\n', (1391, 1419), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((1443, 1484), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.fc_use_batchnorm_inp'], {}), '(options.fc_use_batchnorm_inp)\n', (1454, 1484), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((1843, 1885), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.class_use_laynorm_inp'], {}), '(options.class_use_laynorm_inp)\n', (1854, 1885), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((1912, 1956), 'utils.data_utils.str_to_bool', 'str_to_bool', (['options.class_use_batchnorm_inp'], {}), '(options.class_use_batchnorm_inp)\n', (1923, 1956), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((2422, 2438), 'utils.data_utils.ReadList', 'ReadList', (['tr_lst'], {}), '(tr_lst)\n', (2430, 2438), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((2490, 2506), 'utils.data_utils.ReadList', 'ReadList', (['te_lst'], {}), '(te_lst)\n', (2498, 2506), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((2635, 2655), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2649, 2655), True, 'import numpy as np\n'), ((2680, 2692), 'oneflow.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2690, 2692), True, 'import oneflow.nn as nn\n'), ((3289, 3302), 'model.SincNet.SincNet', 'CNN', (['CNN_arch'], {}), '(CNN_arch)\n', (3292, 3302), True, 'from model.SincNet import SincNet as CNN\n'), ((3719, 3733), 'model.dnn_models.MLP', 'MLP', (['DNN1_arch'], {}), '(DNN1_arch)\n', (3722, 3733), False, 'from model.dnn_models import MLP\n'), ((4077, 4091), 'model.dnn_models.MLP', 'MLP', (['DNN2_arch'], {}), '(DNN2_arch)\n', (4080, 4091), False, 'from model.dnn_models import MLP\n'), ((2560, 2582), 'os.stat', 'os.stat', (['output_folder'], {}), '(output_folder)\n', (2567, 2582), False, 'import os\n'), ((2595, 2618), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (2603, 2618), False, 'import os\n'), ((3361, 3404), 'numpy.load', 'np.load', (['class_dict_file'], {'allow_pickle': '(True)'}), '(class_dict_file, allow_pickle=True)\n', (3368, 3404), True, 'import numpy as np\n'), ((4541, 4629), 'utils.data_utils.create_batches_rnd', 'create_batches_rnd', (['batch_size', 'data_folder', 'wav_lst_tr', 'snt_tr', 'wlen', 'lab_dict', '(0.2)'], {}), '(batch_size, data_folder, wav_lst_tr, snt_tr, wlen,\n lab_dict, 0.2)\n', (4559, 4629), False, 'from utils.data_utils import ReadList, read_conf, str_to_bool, create_batches_rnd\n'), ((4712, 4736), 'oneflow.argmax', 'flow.argmax', (['pout'], {'dim': '(1)'}), '(pout, dim=1)\n', (4723, 4736), True, 'import oneflow as flow\n'), ((5721, 5735), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (5733, 5735), True, 'import oneflow as flow\n'), ((8879, 8930), 'os.path.join', 'os.path.join', (['output_folder', "('CNN_epoch_%d' % epoch)"], {}), "(output_folder, 'CNN_epoch_%d' % epoch)\n", (8891, 8930), False, 'import os\n'), ((8995, 9047), 'os.path.join', 'os.path.join', (['output_folder', "('DNN1_epoch_%d' % epoch)"], {}), "(output_folder, 'DNN1_epoch_%d' % epoch)\n", (9007, 9047), False, 'import os\n'), ((9112, 9164), 'os.path.join', 'os.path.join', (['output_folder', "('DNN2_epoch_%d' % epoch)"], {}), "(output_folder, 'DNN2_epoch_%d' % epoch)\n", (9124, 9164), False, 'import os\n'), ((5804, 5840), 'soundfile.read', 'sf.read', (['(data_folder + wav_lst_te[i])'], {}), '(data_folder + wav_lst_te[i])\n', (5811, 5840), True, 'import soundfile as sf\n'), ((7544, 7568), 'oneflow.argmax', 'flow.argmax', (['pout'], {'dim': '(1)'}), '(pout, dim=1)\n', (7555, 7568), True, 'import oneflow as flow\n'), ((7724, 7745), 'oneflow.sum', 'flow.sum', (['pout'], {'dim': '(0)'}), '(pout, dim=0)\n', (7732, 7745), True, 'import oneflow as flow\n'), ((5867, 5886), 'oneflow.Tensor', 'flow.Tensor', (['signal'], {}), '(signal)\n', (5878, 5886), True, 'import oneflow as flow\n'), ((6156, 6205), 'oneflow.zeros', 'flow.zeros', (['(Batch_dev, wlen)'], {'dtype': 'flow.float32'}), '((Batch_dev, wlen), dtype=flow.float32)\n', (6166, 6205), True, 'import oneflow as flow\n'), ((6315, 6372), 'oneflow.zeros', 'flow.zeros', (['(N_fr + 1, class_lay[-1])'], {'dtype': 'flow.float32'}), '((N_fr + 1, class_lay[-1]), dtype=flow.float32)\n', (6325, 6372), True, 'import oneflow as flow\n'), ((7319, 7351), 'oneflow.Tensor', 'flow.Tensor', (['sig_arr[0:count_fr]'], {}), '(sig_arr[0:count_fr])\n', (7330, 7351), True, 'import oneflow as flow\n'), ((6871, 6891), 'oneflow.Tensor', 'flow.Tensor', (['sig_arr'], {}), '(sig_arr)\n', (6882, 6891), True, 'import oneflow as flow\n'), ((7144, 7193), 'oneflow.zeros', 'flow.zeros', (['(Batch_dev, wlen)'], {'dtype': 'flow.float32'}), '((Batch_dev, wlen), dtype=flow.float32)\n', (7154, 7193), True, 'import oneflow as flow\n'), ((6240, 6260), 'oneflow.zeros', 'flow.zeros', (['(N_fr + 1)'], {}), '(N_fr + 1)\n', (6250, 6260), True, 'import oneflow as flow\n')]
import argparse import os import sys import glob import time import math import numpy as np import psutil import oneflow as flow import oneflow.nn as nn from petastorm.reader import make_batch_reader sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) num_dense_fields = 13 num_sparse_fields = 26 def get_args(print_args=True): def int_list(x): return list(map(int, x.split(","))) def str_list(x): return list(map(str, x.split(","))) parser = argparse.ArgumentParser() parser.add_argument("--data_dir", type=str, required=True) parser.add_argument( "--num_train_samples", type=int, default=36672493, help="the number of training samples" ) parser.add_argument( "--num_valid_samples", type=int, default=4584062, help="the number of validation samples" ) parser.add_argument( "--num_test_samples", type=int, default=4584062, help="the number of test samples" ) parser.add_argument("--shard_seed", type=int, default=2022) parser.add_argument("--model_load_dir", type=str, default=None) parser.add_argument("--model_save_dir", type=str, default=None) parser.add_argument("--save_best_model", action="store_true", help="save best model or not") parser.add_argument( "--save_initial_model", action="store_true", help="save initial model parameters or not." ) parser.add_argument( "--save_model_after_each_eval", action="store_true", help="save model after each eval." ) parser.add_argument("--embedding_vec_size", type=int, default=16) parser.add_argument("--batch_norm", type=bool, default=False) parser.add_argument("--dnn_hidden_units", type=int_list, default="1000,1000,1000,1000,1000") parser.add_argument("--crossing_layers", type=int, default=3) parser.add_argument("--net_dropout", type=float, default=0.2) parser.add_argument("--embedding_regularizer", type=float, default=None) parser.add_argument("--net_regularizer", type=float, default=None) parser.add_argument( "--disable_early_stop", action="store_true", help="enable early stop or not" ) parser.add_argument("--patience", type=int, default=2) parser.add_argument("--min_delta", type=float, default=1.0e-6) parser.add_argument("--lr_factor", type=float, default=0.1) parser.add_argument("--min_lr", type=float, default=1.0e-6) parser.add_argument("--learning_rate", type=float, default=0.001) parser.add_argument("--size_factor", type=int, default=3) parser.add_argument("--valid_batch_size", type=int, default=10000) parser.add_argument("--valid_batches", type=int, default=1000, help="number of valid batches") parser.add_argument("--test_batch_size", type=int, default=10000) parser.add_argument("--test_batches", type=int, default=1000, help="number of test batches") parser.add_argument("--train_batch_size", type=int, default=10000) parser.add_argument("--train_batches", type=int, default=15000, help="number of train batches") parser.add_argument("--loss_print_interval", type=int, default=100) parser.add_argument( "--table_size_array", type=int_list, help="Embedding table size array for sparse fields", required=True, ) parser.add_argument( "--persistent_path", type=str, required=True, help="path for persistent kv store" ) parser.add_argument("--store_type", type=str, default="cached_host_mem") parser.add_argument("--cache_memory_budget_mb", type=int, default=8192) parser.add_argument("--amp", action="store_true", help="Run model with amp") parser.add_argument("--loss_scale_policy", type=str, default="static", help="static or dynamic") args = parser.parse_args() if print_args and flow.env.get_rank() == 0: _print_args(args) return args def _print_args(args): """Print arguments.""" print("------------------------ arguments ------------------------", flush=True) str_list = [] for arg in vars(args): dots = "." * (48 - len(arg)) str_list.append(" {} {} {}".format(arg, dots, getattr(args, arg))) for arg in sorted(str_list, key=lambda x: x.lower()): print(arg, flush=True) print("-------------------- end of arguments ---------------------", flush=True) class DCNDataReader(object): """A context manager that manages the creation and termination of a :class:`petastorm.Reader`. """ def __init__( self, parquet_file_url_list, batch_size, num_epochs=1, shuffle_row_groups=True, shard_seed=2019, shard_count=1, cur_shard=0, ): self.parquet_file_url_list = parquet_file_url_list self.batch_size = batch_size self.num_epochs = num_epochs self.shuffle_row_groups = shuffle_row_groups self.shard_seed = shard_seed self.shard_count = shard_count self.cur_shard = cur_shard fields = ["Label"] fields += [f"I{i+1}" for i in range(num_dense_fields)] fields += [f"C{i+1}" for i in range(num_sparse_fields)] self.fields = fields self.num_fields = len(fields) def __enter__(self): self.reader = make_batch_reader( self.parquet_file_url_list, workers_count=2, shuffle_row_groups=self.shuffle_row_groups, num_epochs=self.num_epochs, shard_seed=self.shard_seed, shard_count=self.shard_count, cur_shard=self.cur_shard, ) self.loader = self.get_batches(self.reader) return self.loader def __exit__(self, exc_type, exc_value, exc_traceback): self.reader.stop() self.reader.join() def get_batches(self, reader, batch_size=None): if batch_size is None: batch_size = self.batch_size tail = None for rg in reader: rgdict = rg._asdict() rglist = [rgdict[field] for field in self.fields] pos = 0 if tail is not None: pos = batch_size - len(tail[0]) tail = list( [ np.concatenate((tail[i], rglist[i][0 : (batch_size - len(tail[i]))])) for i in range(self.num_fields) ] ) if len(tail[0]) == batch_size: label = tail[0] features = tail[1 : self.num_fields] tail = None yield label, np.stack(features, axis=-1) else: pos = 0 continue while (pos + batch_size) <= len(rglist[0]): label = rglist[0][pos : pos + batch_size] features = [rglist[j][pos : pos + batch_size] for j in range(1, self.num_fields)] pos += batch_size yield label, np.stack(features, axis=-1) if pos != len(rglist[0]): tail = [rglist[i][pos:] for i in range(self.num_fields)] def make_criteo_dataloader(data_path, batch_size, shuffle=True, shard_seed=2022): """Make a Criteo Parquet DataLoader. :return: a context manager when exit the returned context manager, the reader will be closed. """ files = ["file://" + name for name in glob.glob(f"{data_path}/*.parquet")] files.sort() world_size = flow.env.get_world_size() batch_size_per_proc = batch_size // world_size return DCNDataReader( files, batch_size_per_proc, None, # TODO: iterate over all eval dataset shuffle_row_groups=shuffle, shard_seed=shard_seed, shard_count=world_size, cur_shard=flow.env.get_rank(), ) class OneEmbedding(nn.Module): def __init__( self, table_name, embedding_vec_size, persistent_path, table_size_array, store_type, cache_memory_budget_mb, size_factor, ): assert table_size_array is not None vocab_size = sum(table_size_array) tables = [ flow.one_embedding.make_table( flow.one_embedding.make_normal_initializer(mean=0.0, std=1e-4) ) for _ in range(len(table_size_array)) ] if store_type == "device_mem": store_options = flow.one_embedding.make_device_mem_store_options( persistent_path=persistent_path, capacity=vocab_size, size_factor=size_factor, ) elif store_type == "cached_host_mem": assert cache_memory_budget_mb > 0 store_options = flow.one_embedding.make_cached_host_mem_store_options( cache_budget_mb=cache_memory_budget_mb, persistent_path=persistent_path, capacity=vocab_size, size_factor=size_factor, ) elif store_type == "cached_ssd": assert cache_memory_budget_mb > 0 store_options = flow.one_embedding.make_cached_ssd_store_options( cache_budget_mb=cache_memory_budget_mb, persistent_path=persistent_path, capacity=vocab_size, size_factor=size_factor, ) else: raise NotImplementedError("not support", store_type) super(OneEmbedding, self).__init__() self.one_embedding = flow.one_embedding.MultiTableEmbedding( name=table_name, embedding_dim=embedding_vec_size, dtype=flow.float, key_type=flow.int64, tables=tables, store_options=store_options, ) def forward(self, ids): return self.one_embedding.forward(ids) class CrossInteractionLayer(nn.Module): ''' Follow the same CrossInteractionLayer implementation of FuxiCTR ''' def __init__(self, input_dim): super(CrossInteractionLayer, self).__init__() self.weight = nn.Linear(input_dim, 1, bias=False) self.bias = nn.Parameter(flow.zeros(input_dim)) def forward(self, X_0, X_i): interaction_out = self.weight(X_i) * X_0 + self.bias return interaction_out class CrossNet(nn.Module): ''' Follow the same CrossNet implementation of FuxiCTR ''' def __init__(self, input_dim, num_layers): super(CrossNet, self).__init__() self.num_layers = num_layers self.cross_net = nn.ModuleList( CrossInteractionLayer(input_dim) for _ in range(self.num_layers) ) def forward(self, X_0): X_i = X_0 # b x dim for i in range(self.num_layers): X_i = X_i + self.cross_net[i](X_0, X_i) return X_i class DNN(nn.Module): def __init__( self, input_dim, hidden_units=[], dropout_rates=0, batch_norm=False, use_bias=True, ): super(DNN, self).__init__() dense_layers = [] hidden_units = [input_dim] + hidden_units for idx in range(len(hidden_units) - 1): dense_layers.append(nn.Linear(hidden_units[idx], hidden_units[idx + 1], bias=use_bias)) dense_layers.append(nn.ReLU()) if batch_norm: dense_layers.append(nn.BatchNorm1d(hidden_units[idx + 1])) if dropout_rates > 0: dense_layers.append(nn.Dropout(p=dropout_rates)) self.dnn = nn.Sequential(*dense_layers) # * used to unpack list def forward(self, inputs): return self.dnn(inputs) class DCNModule(nn.Module): def __init__( self, embedding_vec_size, persistent_path, table_size_array, one_embedding_store_type, cache_memory_budget_mb, size_factor, dnn_hidden_units=[128, 128], crossing_layers=3, net_dropout=0.2, batch_norm=False, ): super(DCNModule, self).__init__() self.embedding_layer = OneEmbedding( table_name="sparse_embedding", embedding_vec_size=embedding_vec_size, persistent_path=persistent_path, table_size_array=table_size_array, store_type=one_embedding_store_type, cache_memory_budget_mb=cache_memory_budget_mb, size_factor=size_factor, ) input_dim = embedding_vec_size * (num_dense_fields + num_sparse_fields) self.dnn = ( DNN( input_dim=input_dim, hidden_units=dnn_hidden_units, dropout_rates=net_dropout, batch_norm=batch_norm, use_bias=True, ) if dnn_hidden_units else None ) # in case of only crossing net used self.crossnet = CrossNet(input_dim, crossing_layers) final_dim = input_dim if isinstance(dnn_hidden_units, list) and len(dnn_hidden_units) > 0: # if use dnn final_dim += dnn_hidden_units[-1] self.fc = nn.Linear(final_dim, 1) # [cross_part, dnn_part] -> logit self.reset_parameters() def forward(self, X): feature_emb = self.embedding_layer(X) flat_feature_emb = feature_emb.flatten(start_dim=1) cross_out = self.crossnet(flat_feature_emb) if self.dnn is not None: dnn_out = self.dnn(flat_feature_emb) final_out = flow.cat([cross_out, dnn_out], dim=-1) else: final_out = cross_out y_pred = self.fc(final_out) return y_pred.sigmoid() def reset_parameters(self): def reset_param(m): if type(m) == nn.Linear: nn.init.xavier_normal_(m.weight) if m.bias is not None: m.bias.data.fill_(0) self.apply(reset_param) def make_dcn_module(args): model = DCNModule( embedding_vec_size=args.embedding_vec_size, persistent_path=args.persistent_path, table_size_array=args.table_size_array, one_embedding_store_type=args.store_type, cache_memory_budget_mb=args.cache_memory_budget_mb, dnn_hidden_units=args.dnn_hidden_units, crossing_layers=args.crossing_layers, net_dropout=args.net_dropout, batch_norm=args.batch_norm, size_factor=args.size_factor, ) return model class DCNValGraph(flow.nn.Graph): def __init__(self, dcn_module, amp=False): super(DCNValGraph, self).__init__() self.module = dcn_module if amp: self.config.enable_amp(True) def build(self, features): predicts = self.module(features.to("cuda")) return predicts class DCNTrainGraph(flow.nn.Graph): def __init__( self, dcn_module, loss, optimizer, lr_scheduler=None, grad_scaler=None, amp=False, ): super(DCNTrainGraph, self).__init__() self.module = dcn_module self.loss = loss self.add_optimizer(optimizer, lr_sch=lr_scheduler) self.config.allow_fuse_model_update_ops(True) self.config.allow_fuse_add_to_output(True) self.config.allow_fuse_cast_scale(True) if amp: self.config.enable_amp(True) self.set_grad_scaler(grad_scaler) def build(self, labels, features): logits = self.module(features.to("cuda")).squeeze() loss = self.loss(logits, labels.squeeze().to("cuda")) reduce_loss = flow.mean(loss) reduce_loss.backward() return reduce_loss.to("cpu") def make_lr_scheduler(args, optimizer): batches_per_epoch = math.ceil(args.num_train_samples / args.train_batch_size) multistep_lr = flow.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[3 * batches_per_epoch], gamma=args.lr_factor ) return multistep_lr def train(args): rank = flow.env.get_rank() dcn_module = make_dcn_module(args) dcn_module.to_global(flow.env.all_device_placement("cuda"), flow.sbp.broadcast) def load_model(dir): if rank == 0: print(f"Loading model from {dir}") if os.path.exists(dir): state_dict = flow.load(dir, global_src_rank=0) dcn_module.load_state_dict(state_dict, strict=False) else: if rank == 0: print(f"Loading model from {dir} failed: invalid path") if args.model_load_dir: load_model(args.model_load_dir) def save_model(subdir): if not args.model_save_dir: return save_path = os.path.join(args.model_save_dir, subdir) if rank == 0: print(f"Saving model to {save_path}") state_dict = dcn_module.state_dict() flow.save(state_dict, save_path, global_dst_rank=0) if args.save_initial_model: save_model("initial_checkpoint") def get_metrics(logs): kv = {"auc": 1, "logloss": -1} monitor_value = 0 for k, v in kv.items(): monitor_value += logs.get(k, 0) * v return monitor_value def early_stop( epoch, monitor_value, best_metric, stopping_steps, patience=2, min_delta=1e-6, ): rank = flow.env.get_rank() save_best = False stop_training = False if monitor_value < best_metric + min_delta: stopping_steps += 1 if rank == 0: print("Monitor(max) STOP: {:.6f}!".format(monitor_value)) else: stopping_steps = 0 best_metric = monitor_value save_best = True if stopping_steps >= patience: stop_training = True if rank == 0: print(f"Early stopping at epoch={epoch}!") return stop_training, best_metric, stopping_steps, save_best opt = flow.optim.Adam(dcn_module.parameters(), lr=args.learning_rate) lr_scheduler = None loss_func = flow.nn.BCELoss(reduction="none").to("cuda") if args.loss_scale_policy == "static": grad_scaler = flow.amp.StaticGradScaler(1024) else: grad_scaler = flow.amp.GradScaler( init_scale=1073741824, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, ) eval_graph = DCNValGraph(dcn_module, args.amp) train_graph = DCNTrainGraph(dcn_module, loss_func, opt, lr_scheduler, grad_scaler, args.amp) batches_per_epoch = math.ceil(args.num_train_samples / args.train_batch_size) best_metric = -np.inf epoch = 0 stopping_steps = 0 stop_training = False cached_valid_batches = prefetch_eval_batches( f"{args.data_dir}/val", args.valid_batch_size, args.valid_batches ) with make_criteo_dataloader( f"{args.data_dir}/train", args.train_batch_size, shard_seed=args.shard_seed ) as loader: dcn_module.train() last_step, last_time = 0, time.time() for step in range(1, args.train_batches + 1): labels, features = batch_to_global(*next(loader)) loss = train_graph(labels, features) if step % args.loss_print_interval == 0: loss = loss.numpy() if rank == 0: latency = (time.time() - last_time) / (step - last_step) throughput = args.train_batch_size / latency last_step, last_time = step, time.time() strtime = time.strftime("%Y-%m-%d %H:%M:%S") print( f"Rank[{rank}], Step {step}, Loss {loss:0.4f}, " + f"Latency {(latency * 1000):0.3f} ms, Throughput {throughput:0.1f}, {strtime}" ) if step % batches_per_epoch == 0: epoch += 1 val_auc, val_logloss = eval( args, eval_graph, tag="val", cur_step=step, epoch=epoch, cached_eval_batches=cached_valid_batches, ) if args.save_model_after_each_eval: save_model(f"step_{step}_val_auc_{val_auc:0.5f}") monitor_value = get_metrics(logs={"auc": val_auc, "logloss": val_logloss}) stop_training, best_metric, stopping_steps, save_best = early_stop( epoch, monitor_value, best_metric=best_metric, stopping_steps=stopping_steps, patience=args.patience, min_delta=args.min_delta, ) if args.save_best_model and save_best: if rank == 0: print(f"Save best model: monitor(max): {best_metric:.6f}") save_model("best_checkpoint") if not args.disable_early_stop and stop_training: break dcn_module.train() last_time = time.time() if args.save_best_model: load_model(f"{args.model_save_dir}/best_checkpoint") if rank == 0: print("================ Test Evaluation ================") eval(args, eval_graph, tag="test", cur_step=step, epoch=epoch) def _np_to_global(np_array): t = flow.from_numpy(np_array) return t.to_global(placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.split(0)) def batch_to_global(np_label, np_features, is_train=True): labels = _np_to_global(np_label.reshape(-1, 1)) if is_train else np_label.reshape(-1, 1) features = _np_to_global(np_features) return labels, features def prefetch_eval_batches(data_dir, batch_size, num_batches): cached_eval_batches = [] with make_criteo_dataloader(data_dir, batch_size, shuffle=False) as loader: for _ in range(num_batches): label, features = batch_to_global(*next(loader), is_train=False) cached_eval_batches.append((label, features)) return cached_eval_batches def eval(args, eval_graph, tag="val", cur_step=0, epoch=0, cached_eval_batches=None): if tag == "val": batches_per_epoch = math.ceil(args.num_valid_samples / args.valid_batch_size) batch_size = args.valid_batch_size else: batches_per_epoch = math.ceil(args.num_test_samples / args.test_batch_size) batch_size = args.test_batch_size eval_graph.module.eval() labels, preds = [], [] eval_start_time = time.time() if cached_eval_batches == None: with make_criteo_dataloader(f"{args.data_dir}/{tag}", batch_size, shuffle=False) as loader: eval_start_time = time.time() for i in range(batches_per_epoch): label, features = batch_to_global(*next(loader), is_train=False) pred = eval_graph(features) labels.append(label) preds.append(pred.to_local()) else: for i in range(batches_per_epoch): label, features = cached_eval_batches[i] pred = eval_graph(features) labels.append(label) preds.append(pred.to_local()) labels = ( _np_to_global(np.concatenate(labels, axis=0)).to_global(sbp=flow.sbp.broadcast()).to_local() ) preds = ( flow.cat(preds, dim=0) .to_global(placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.split(0)) .to_global(sbp=flow.sbp.broadcast()) .to_local() ) flow.comm.barrier() eval_time = time.time() - eval_start_time rank = flow.env.get_rank() metrics_start_time = time.time() auc = flow.roc_auc_score(labels, preds).numpy()[0] logloss = flow._C.binary_cross_entropy_loss(preds, labels, weight=None, reduction="mean").item() metrics_time = time.time() - metrics_start_time if rank == 0: host_mem_mb = psutil.Process().memory_info().rss // (1024 * 1024) stream = os.popen("nvidia-smi --query-gpu=memory.used --format=csv") device_mem_str = stream.read().split("\n")[rank + 1] strtime = time.strftime("%Y-%m-%d %H:%M:%S") print( f"Rank[{rank}], Epoch {epoch}, Step {cur_step}, AUC {auc:0.6f}, LogLoss {logloss:0.6f}, " + f"Eval_time {eval_time:0.2f} s, Metrics_time {metrics_time:0.2f} s, Eval_samples {labels.shape[0]}, " + f"GPU_Memory {device_mem_str}, Host_Memory {host_mem_mb} MiB, {strtime}" ) return auc, logloss if __name__ == "__main__": os.system(sys.executable + " -m oneflow --doctor") flow.boxing.nccl.enable_all_to_all(True) args = get_args() train(args)
[ "oneflow.save", "oneflow.nn.BCELoss", "oneflow.one_embedding.make_device_mem_store_options", "oneflow.boxing.nccl.enable_all_to_all", "oneflow.sbp.broadcast", "oneflow._C.binary_cross_entropy_loss", "oneflow.env.all_device_placement", "oneflow.amp.GradScaler", "oneflow.nn.ReLU", "oneflow.optim.lr_scheduler.MultiStepLR", "oneflow.env.get_rank", "oneflow.mean", "oneflow.one_embedding.make_normal_initializer", "oneflow.one_embedding.MultiTableEmbedding", "oneflow.one_embedding.make_cached_host_mem_store_options", "oneflow.nn.Linear", "oneflow.nn.BatchNorm1d", "oneflow.one_embedding.make_cached_ssd_store_options", "oneflow.from_numpy", "oneflow.env.get_world_size", "oneflow.load", "oneflow.cat", "oneflow.amp.StaticGradScaler", "oneflow.roc_auc_score", "oneflow.nn.Dropout", "oneflow.sbp.split", "oneflow.comm.barrier", "oneflow.nn.init.xavier_normal_", "oneflow.zeros", "oneflow.nn.Sequential" ]
[((514, 539), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (537, 539), False, 'import argparse\n'), ((7464, 7489), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (7487, 7489), True, 'import oneflow as flow\n'), ((15591, 15648), 'math.ceil', 'math.ceil', (['(args.num_train_samples / args.train_batch_size)'], {}), '(args.num_train_samples / args.train_batch_size)\n', (15600, 15648), False, 'import math\n'), ((15668, 15776), 'oneflow.optim.lr_scheduler.MultiStepLR', 'flow.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[3 * batches_per_epoch]', 'gamma': 'args.lr_factor'}), '(optimizer, milestones=[3 *\n batches_per_epoch], gamma=args.lr_factor)\n', (15703, 15776), True, 'import oneflow as flow\n'), ((15841, 15860), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (15858, 15860), True, 'import oneflow as flow\n'), ((18336, 18393), 'math.ceil', 'math.ceil', (['(args.num_train_samples / args.train_batch_size)'], {}), '(args.num_train_samples / args.train_batch_size)\n', (18345, 18393), False, 'import math\n'), ((21197, 21222), 'oneflow.from_numpy', 'flow.from_numpy', (['np_array'], {}), '(np_array)\n', (21212, 21222), True, 'import oneflow as flow\n'), ((22370, 22381), 'time.time', 'time.time', ([], {}), '()\n', (22379, 22381), False, 'import time\n'), ((23371, 23390), 'oneflow.comm.barrier', 'flow.comm.barrier', ([], {}), '()\n', (23388, 23390), True, 'import oneflow as flow\n'), ((23449, 23468), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (23466, 23468), True, 'import oneflow as flow\n'), ((23495, 23506), 'time.time', 'time.time', ([], {}), '()\n', (23504, 23506), False, 'import time\n'), ((24388, 24438), 'os.system', 'os.system', (["(sys.executable + ' -m oneflow --doctor')"], {}), "(sys.executable + ' -m oneflow --doctor')\n", (24397, 24438), False, 'import os\n'), ((24443, 24483), 'oneflow.boxing.nccl.enable_all_to_all', 'flow.boxing.nccl.enable_all_to_all', (['(True)'], {}), '(True)\n', (24477, 24483), True, 'import oneflow as flow\n'), ((5274, 5505), 'petastorm.reader.make_batch_reader', 'make_batch_reader', (['self.parquet_file_url_list'], {'workers_count': '(2)', 'shuffle_row_groups': 'self.shuffle_row_groups', 'num_epochs': 'self.num_epochs', 'shard_seed': 'self.shard_seed', 'shard_count': 'self.shard_count', 'cur_shard': 'self.cur_shard'}), '(self.parquet_file_url_list, workers_count=2,\n shuffle_row_groups=self.shuffle_row_groups, num_epochs=self.num_epochs,\n shard_seed=self.shard_seed, shard_count=self.shard_count, cur_shard=\n self.cur_shard)\n', (5291, 5505), False, 'from petastorm.reader import make_batch_reader\n'), ((9470, 9652), 'oneflow.one_embedding.MultiTableEmbedding', 'flow.one_embedding.MultiTableEmbedding', ([], {'name': 'table_name', 'embedding_dim': 'embedding_vec_size', 'dtype': 'flow.float', 'key_type': 'flow.int64', 'tables': 'tables', 'store_options': 'store_options'}), '(name=table_name, embedding_dim=\n embedding_vec_size, dtype=flow.float, key_type=flow.int64, tables=\n tables, store_options=store_options)\n', (9508, 9652), True, 'import oneflow as flow\n'), ((10039, 10074), 'oneflow.nn.Linear', 'nn.Linear', (['input_dim', '(1)'], {'bias': '(False)'}), '(input_dim, 1, bias=False)\n', (10048, 10074), True, 'import oneflow.nn as nn\n'), ((11444, 11472), 'oneflow.nn.Sequential', 'nn.Sequential', (['*dense_layers'], {}), '(*dense_layers)\n', (11457, 11472), True, 'import oneflow.nn as nn\n'), ((13021, 13044), 'oneflow.nn.Linear', 'nn.Linear', (['final_dim', '(1)'], {}), '(final_dim, 1)\n', (13030, 13044), True, 'import oneflow.nn as nn\n'), ((15440, 15455), 'oneflow.mean', 'flow.mean', (['loss'], {}), '(loss)\n', (15449, 15455), True, 'import oneflow as flow\n'), ((15925, 15962), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (15954, 15962), True, 'import oneflow as flow\n'), ((16090, 16109), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (16104, 16109), False, 'import os\n'), ((16520, 16561), 'os.path.join', 'os.path.join', (['args.model_save_dir', 'subdir'], {}), '(args.model_save_dir, subdir)\n', (16532, 16561), False, 'import os\n'), ((16687, 16738), 'oneflow.save', 'flow.save', (['state_dict', 'save_path'], {'global_dst_rank': '(0)'}), '(state_dict, save_path, global_dst_rank=0)\n', (16696, 16738), True, 'import oneflow as flow\n'), ((17145, 17164), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (17162, 17164), True, 'import oneflow as flow\n'), ((17971, 18002), 'oneflow.amp.StaticGradScaler', 'flow.amp.StaticGradScaler', (['(1024)'], {}), '(1024)\n', (17996, 18002), True, 'import oneflow as flow\n'), ((18035, 18142), 'oneflow.amp.GradScaler', 'flow.amp.GradScaler', ([], {'init_scale': '(1073741824)', 'growth_factor': '(2.0)', 'backoff_factor': '(0.5)', 'growth_interval': '(2000)'}), '(init_scale=1073741824, growth_factor=2.0,\n backoff_factor=0.5, growth_interval=2000)\n', (18054, 18142), True, 'import oneflow as flow\n'), ((22054, 22111), 'math.ceil', 'math.ceil', (['(args.num_valid_samples / args.valid_batch_size)'], {}), '(args.num_valid_samples / args.valid_batch_size)\n', (22063, 22111), False, 'import math\n'), ((22193, 22248), 'math.ceil', 'math.ceil', (['(args.num_test_samples / args.test_batch_size)'], {}), '(args.num_test_samples / args.test_batch_size)\n', (22202, 22248), False, 'import math\n'), ((23407, 23418), 'time.time', 'time.time', ([], {}), '()\n', (23416, 23418), False, 'import time\n'), ((23682, 23693), 'time.time', 'time.time', ([], {}), '()\n', (23691, 23693), False, 'import time\n'), ((23825, 23884), 'os.popen', 'os.popen', (['"""nvidia-smi --query-gpu=memory.used --format=csv"""'], {}), "('nvidia-smi --query-gpu=memory.used --format=csv')\n", (23833, 23884), False, 'import os\n'), ((23965, 23999), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (23978, 23999), False, 'import time\n'), ((246, 271), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (261, 271), False, 'import os\n'), ((3813, 3832), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (3830, 3832), True, 'import oneflow as flow\n'), ((7392, 7427), 'glob.glob', 'glob.glob', (['f"""{data_path}/*.parquet"""'], {}), "(f'{data_path}/*.parquet')\n", (7401, 7427), False, 'import glob\n'), ((7782, 7801), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (7799, 7801), True, 'import oneflow as flow\n'), ((8423, 8555), 'oneflow.one_embedding.make_device_mem_store_options', 'flow.one_embedding.make_device_mem_store_options', ([], {'persistent_path': 'persistent_path', 'capacity': 'vocab_size', 'size_factor': 'size_factor'}), '(persistent_path=\n persistent_path, capacity=vocab_size, size_factor=size_factor)\n', (8471, 8555), True, 'import oneflow as flow\n'), ((10108, 10129), 'oneflow.zeros', 'flow.zeros', (['input_dim'], {}), '(input_dim)\n', (10118, 10129), True, 'import oneflow as flow\n'), ((13405, 13443), 'oneflow.cat', 'flow.cat', (['[cross_out, dnn_out]'], {'dim': '(-1)'}), '([cross_out, dnn_out], dim=-1)\n', (13413, 13443), True, 'import oneflow as flow\n'), ((16136, 16169), 'oneflow.load', 'flow.load', (['dir'], {'global_src_rank': '(0)'}), '(dir, global_src_rank=0)\n', (16145, 16169), True, 'import oneflow as flow\n'), ((17860, 17893), 'oneflow.nn.BCELoss', 'flow.nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (17875, 17893), True, 'import oneflow as flow\n'), ((18811, 18822), 'time.time', 'time.time', ([], {}), '()\n', (18820, 18822), False, 'import time\n'), ((21256, 21292), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (21285, 21292), True, 'import oneflow as flow\n'), ((21298, 21315), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (21312, 21315), True, 'import oneflow as flow\n'), ((22549, 22560), 'time.time', 'time.time', ([], {}), '()\n', (22558, 22560), False, 'import time\n'), ((23576, 23655), 'oneflow._C.binary_cross_entropy_loss', 'flow._C.binary_cross_entropy_loss', (['preds', 'labels'], {'weight': 'None', 'reduction': '"""mean"""'}), "(preds, labels, weight=None, reduction='mean')\n", (23609, 23655), True, 'import oneflow as flow\n'), ((8219, 8283), 'oneflow.one_embedding.make_normal_initializer', 'flow.one_embedding.make_normal_initializer', ([], {'mean': '(0.0)', 'std': '(0.0001)'}), '(mean=0.0, std=0.0001)\n', (8261, 8283), True, 'import oneflow as flow\n'), ((8702, 8884), 'oneflow.one_embedding.make_cached_host_mem_store_options', 'flow.one_embedding.make_cached_host_mem_store_options', ([], {'cache_budget_mb': 'cache_memory_budget_mb', 'persistent_path': 'persistent_path', 'capacity': 'vocab_size', 'size_factor': 'size_factor'}), '(cache_budget_mb=\n cache_memory_budget_mb, persistent_path=persistent_path, capacity=\n vocab_size, size_factor=size_factor)\n', (8755, 8884), True, 'import oneflow as flow\n'), ((11113, 11179), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_units[idx]', 'hidden_units[idx + 1]'], {'bias': 'use_bias'}), '(hidden_units[idx], hidden_units[idx + 1], bias=use_bias)\n', (11122, 11179), True, 'import oneflow.nn as nn\n'), ((11213, 11222), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11220, 11222), True, 'import oneflow.nn as nn\n'), ((13674, 13706), 'oneflow.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight'], {}), '(m.weight)\n', (13696, 13706), True, 'import oneflow.nn as nn\n'), ((20904, 20915), 'time.time', 'time.time', ([], {}), '()\n', (20913, 20915), False, 'import time\n'), ((23517, 23550), 'oneflow.roc_auc_score', 'flow.roc_auc_score', (['labels', 'preds'], {}), '(labels, preds)\n', (23535, 23550), True, 'import oneflow as flow\n'), ((9069, 9246), 'oneflow.one_embedding.make_cached_ssd_store_options', 'flow.one_embedding.make_cached_ssd_store_options', ([], {'cache_budget_mb': 'cache_memory_budget_mb', 'persistent_path': 'persistent_path', 'capacity': 'vocab_size', 'size_factor': 'size_factor'}), '(cache_budget_mb=\n cache_memory_budget_mb, persistent_path=persistent_path, capacity=\n vocab_size, size_factor=size_factor)\n', (9117, 9246), True, 'import oneflow as flow\n'), ((11287, 11324), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_units[idx + 1]'], {}), '(hidden_units[idx + 1])\n', (11301, 11324), True, 'import oneflow.nn as nn\n'), ((11396, 11423), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_rates'}), '(p=dropout_rates)\n', (11406, 11423), True, 'import oneflow.nn as nn\n'), ((19341, 19375), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (19354, 19375), False, 'import time\n'), ((23121, 23141), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (23139, 23141), True, 'import oneflow as flow\n'), ((23318, 23338), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (23336, 23338), True, 'import oneflow as flow\n'), ((6980, 7007), 'numpy.stack', 'np.stack', (['features'], {'axis': '(-1)'}), '(features, axis=-1)\n', (6988, 7007), True, 'import numpy as np\n'), ((19299, 19310), 'time.time', 'time.time', ([], {}), '()\n', (19308, 19310), False, 'import time\n'), ((23075, 23105), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (23089, 23105), True, 'import numpy as np\n'), ((23756, 23772), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (23770, 23772), False, 'import psutil\n'), ((6598, 6625), 'numpy.stack', 'np.stack', (['features'], {'axis': '(-1)'}), '(features, axis=-1)\n', (6606, 6625), True, 'import numpy as np\n'), ((19139, 19150), 'time.time', 'time.time', ([], {}), '()\n', (19148, 19150), False, 'import time\n'), ((23182, 23204), 'oneflow.cat', 'flow.cat', (['preds'], {'dim': '(0)'}), '(preds, dim=0)\n', (23190, 23204), True, 'import oneflow as flow\n'), ((23234, 23270), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (23263, 23270), True, 'import oneflow as flow\n'), ((23276, 23293), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (23290, 23293), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict from typing import Tuple import numpy as np from oneflow.compatible import single_client as flow from oneflow.compatible.single_client import typing as tp from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type import os ninf = -float("inf") def _logsumexp(a, b): if a < b: a, b = b, a if b == ninf: return a else: return a + np.log(1 + np.exp(b - a)) def logsumexp(*args): res = args[0] for e in args[1:]: res = _logsumexp(res, e) return res def log_softmax(logits, axis=0): max_value = np.max(logits, axis, keepdims=True) exp = np.exp(logits - max_value) exp_sum = np.sum(exp, axis, keepdims=True) dist = exp / exp_sum return np.log(dist) def np_ctc_greedy_decoder(log_probs, input_lengths, merge_repeated=True): blank_label = log_probs.shape[2] - 1 decodes = np.zeros( (log_probs.shape[1], log_probs.shape[0]), dtype=input_lengths.dtype ) neg_sum_logits = np.zeros((input_lengths.size, 1), dtype=log_probs.dtype) for b in range(input_lengths.size): input_length = input_lengths[b] prev_indices = -1 t_dec = 0 for t in range(input_length): max_indice = np.argmax(log_probs[t, b, :]) neg_sum_logits[b, 0] -= log_probs[t, b, max_indice] if max_indice != blank_label and not ( merge_repeated and max_indice == prev_indices ): decodes[b, t_dec] = max_indice t_dec += 1 prev_indices = max_indice return decodes, neg_sum_logits def compare_with_np( device_type, device_num, data_type, max_input_length, batch_size, num_classes, merge_repeated, ): assert data_type in ["float32", "double"] assert device_type in ["gpu", "cpu"] assert merge_repeated in [False, True] flow.clear_default_session() if device_type == "cpu": flow.config.cpu_device_num(device_num) else: flow.config.gpu_device_num(device_num) flow_data_type = type_name_to_flow_type[data_type] np_data_type = type_name_to_np_type[data_type] func_config = flow.FunctionConfig() func_config.default_logical_view(flow.scope.consistent_view()) func_config.default_data_type(flow_data_type) func_config.default_placement_scope( flow.scope.placement(device_type, "0:0-{}".format(device_num - 1)) ) log_probs = np.random.random( size=(max_input_length, batch_size, num_classes) ).astype(np_data_type) log_probs = log_softmax(log_probs, axis=2) input_lengths = np.random.randint( max_input_length / 2, high=max_input_length, size=(batch_size,), dtype=np.int64 ) @flow.global_function(function_config=func_config) def ctc_greedy_decoder_job( log_probs: tp.Numpy.Placeholder( shape=(max_input_length, batch_size, num_classes), dtype=flow_data_type ), input_lengths: tp.Numpy.Placeholder(shape=(batch_size,), dtype=flow.int64), ) -> Tuple[tp.Numpy, tp.Numpy]: with flow.scope.placement(device_type, "0:0"): decoded, neg_sum_logits = flow.nn.ctc_greedy_decoder( log_probs, input_lengths, merge_repeated ) return decoded, neg_sum_logits of_decoded, of_neg_sum_logits = ctc_greedy_decoder_job(log_probs, input_lengths) np_decoded, np_neg_sum_logits = np_ctc_greedy_decoder( log_probs, input_lengths, merge_repeated ) np.allclose(of_decoded, np_decoded, atol=1e-5) np.allclose(of_neg_sum_logits, np_neg_sum_logits, atol=1e-5) def gen_arg_list(type): arg_dict = OrderedDict() if type == "1n2d": arg_dict["device_type"] = ["gpu"] arg_dict["device_num"] = [2] else: arg_dict["device_type"] = ["cpu", "gpu"] arg_dict["device_num"] = [1] arg_dict["data_type"] = ["float32"] arg_dict["max_input_length"] = [20] arg_dict["batch_size"] = [4] arg_dict["num_classes"] = [5] arg_dict["merge_repeated"] = [False, True] return GenArgList(arg_dict) @flow.unittest.skip_unless_1n1d() class TestCTCGreedyDecoder1n1d(flow.unittest.TestCase): def test_ctc_greedy_decoder(test_case): for arg in gen_arg_list("1n1d"): compare_with_np(*arg) @flow.unittest.skip_unless_1n2d() class TestCTCGreedyDecoder1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_ctc_greedy_decoder(test_case): for arg in gen_arg_list("1n2d"): compare_with_np(*arg) if __name__ == "__main__": unittest.main()
[ "oneflow.compatible.single_client.clear_default_session", "oneflow.compatible.single_client.nn.ctc_greedy_decoder", "oneflow.compatible.single_client.FunctionConfig", "oneflow.compatible.single_client.unittest.skip_unless_1n2d", "oneflow.compatible.single_client.typing.Numpy.Placeholder", "oneflow.compatible.single_client.unittest.skip_unless_1n1d", "oneflow.compatible.single_client.config.cpu_device_num", "oneflow.compatible.single_client.config.gpu_device_num", "oneflow.compatible.single_client.scope.placement", "oneflow.compatible.single_client.scope.consistent_view", "oneflow.compatible.single_client.global_function" ]
[((4753, 4785), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4783, 4785), True, 'from oneflow.compatible import single_client as flow\n'), ((4964, 4996), 'oneflow.compatible.single_client.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (4994, 4996), True, 'from oneflow.compatible import single_client as flow\n'), ((1221, 1256), 'numpy.max', 'np.max', (['logits', 'axis'], {'keepdims': '(True)'}), '(logits, axis, keepdims=True)\n', (1227, 1256), True, 'import numpy as np\n'), ((1267, 1293), 'numpy.exp', 'np.exp', (['(logits - max_value)'], {}), '(logits - max_value)\n', (1273, 1293), True, 'import numpy as np\n'), ((1308, 1340), 'numpy.sum', 'np.sum', (['exp', 'axis'], {'keepdims': '(True)'}), '(exp, axis, keepdims=True)\n', (1314, 1340), True, 'import numpy as np\n'), ((1377, 1389), 'numpy.log', 'np.log', (['dist'], {}), '(dist)\n', (1383, 1389), True, 'import numpy as np\n'), ((1521, 1598), 'numpy.zeros', 'np.zeros', (['(log_probs.shape[1], log_probs.shape[0])'], {'dtype': 'input_lengths.dtype'}), '((log_probs.shape[1], log_probs.shape[0]), dtype=input_lengths.dtype)\n', (1529, 1598), True, 'import numpy as np\n'), ((1634, 1690), 'numpy.zeros', 'np.zeros', (['(input_lengths.size, 1)'], {'dtype': 'log_probs.dtype'}), '((input_lengths.size, 1), dtype=log_probs.dtype)\n', (1642, 1690), True, 'import numpy as np\n'), ((2531, 2559), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2557, 2559), True, 'from oneflow.compatible import single_client as flow\n'), ((2817, 2838), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2836, 2838), True, 'from oneflow.compatible import single_client as flow\n'), ((3264, 3367), 'numpy.random.randint', 'np.random.randint', (['(max_input_length / 2)'], {'high': 'max_input_length', 'size': '(batch_size,)', 'dtype': 'np.int64'}), '(max_input_length / 2, high=max_input_length, size=(\n batch_size,), dtype=np.int64)\n', (3281, 3367), True, 'import numpy as np\n'), ((3383, 3432), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3403, 3432), True, 'from oneflow.compatible import single_client as flow\n'), ((4158, 4205), 'numpy.allclose', 'np.allclose', (['of_decoded', 'np_decoded'], {'atol': '(1e-05)'}), '(of_decoded, np_decoded, atol=1e-05)\n', (4169, 4205), True, 'import numpy as np\n'), ((4209, 4270), 'numpy.allclose', 'np.allclose', (['of_neg_sum_logits', 'np_neg_sum_logits'], {'atol': '(1e-05)'}), '(of_neg_sum_logits, np_neg_sum_logits, atol=1e-05)\n', (4220, 4270), True, 'import numpy as np\n'), ((4311, 4324), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4322, 4324), False, 'from collections import OrderedDict\n'), ((4729, 4749), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4739, 4749), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5285, 5300), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5298, 5300), False, 'import unittest\n'), ((2597, 2635), 'oneflow.compatible.single_client.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_num'], {}), '(device_num)\n', (2623, 2635), True, 'from oneflow.compatible import single_client as flow\n'), ((2654, 2692), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_num'], {}), '(device_num)\n', (2680, 2692), True, 'from oneflow.compatible import single_client as flow\n'), ((2876, 2904), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2902, 2904), True, 'from oneflow.compatible import single_client as flow\n'), ((5074, 5108), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5083, 5108), False, 'import os\n'), ((1878, 1907), 'numpy.argmax', 'np.argmax', (['log_probs[t, b, :]'], {}), '(log_probs[t, b, :])\n', (1887, 1907), True, 'import numpy as np\n'), ((3095, 3161), 'numpy.random.random', 'np.random.random', ([], {'size': '(max_input_length, batch_size, num_classes)'}), '(size=(max_input_length, batch_size, num_classes))\n', (3111, 3161), True, 'import numpy as np\n'), ((3484, 3581), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(max_input_length, batch_size, num_classes)', 'dtype': 'flow_data_type'}), '(shape=(max_input_length, batch_size, num_classes),\n dtype=flow_data_type)\n', (3504, 3581), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((3624, 3683), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(batch_size,)', 'dtype': 'flow.int64'}), '(shape=(batch_size,), dtype=flow.int64)\n', (3644, 3683), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((3734, 3774), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3754, 3774), True, 'from oneflow.compatible import single_client as flow\n'), ((3814, 3882), 'oneflow.compatible.single_client.nn.ctc_greedy_decoder', 'flow.nn.ctc_greedy_decoder', (['log_probs', 'input_lengths', 'merge_repeated'], {}), '(log_probs, input_lengths, merge_repeated)\n', (3840, 3882), True, 'from oneflow.compatible import single_client as flow\n'), ((1042, 1055), 'numpy.exp', 'np.exp', (['(b - a)'], {}), '(b - a)\n', (1048, 1055), True, 'import numpy as np\n')]
""" ELBO """ import oneflow.experimental as flow class ELBO(flow.nn.Module): def __init__(self, generator, variational): super(ELBO, self).__init__() self.generator = generator self.variational = variational def log_joint(self, nodes): log_joint_ = None for n_name in nodes.keys(): try: log_joint_ += nodes[n_name].log_prob() except: log_joint_ = nodes[n_name].log_prob() return log_joint_ def forward(self, observed, reduce_mean=True): nodes_q = self.variational(observed).nodes _v_inputs = {k:v.tensor for k,v in nodes_q.items()} _observed = {**_v_inputs, **observed} nodes_p = self.generator(_observed).nodes logpxz = self.log_joint(nodes_p) logqz = self.log_joint(nodes_q) if len(logqz.shape) > 0 and reduce_mean: elbo = flow.mean(logpxz - logqz) else: elbo = logpxz - logqz return -elbo
[ "oneflow.experimental.mean" ]
[((918, 943), 'oneflow.experimental.mean', 'flow.mean', (['(logpxz - logqz)'], {}), '(logpxz - logqz)\n', (927, 943), True, 'import oneflow.experimental as flow\n')]
import oneflow as flow import os import oneflow.nn as nn import yaml from model import AE from data_utils import get_data_loader from data_utils import PickleDataset from utils import * import time import re import shutil class Solver(object): def __init__(self, config, args): # config store the value of hyperparameters, turn to attr by AttrDict self.config = config print(config) # args store other information self.args = args print(self.args) # Create save folder os.makedirs(self.args.store_model_path, exist_ok=True) # get dataloader self.get_data_loaders() # init the model with config self.build_model() self.save_config() def save_config(self): with open(f"{self.args.store_model_path}.config.yaml", "w") as f: yaml.dump(self.config, f) with open(f"{self.args.store_model_path}.args.yaml", "w") as f: yaml.dump(vars(self.args), f) return def get_data_loaders(self): data_dir = self.args.data_dir self.train_dataset = PickleDataset( os.path.join(data_dir, f"{self.args.train_set}.pkl"), os.path.join(data_dir, self.args.train_index_file), segment_size=self.config["data_loader"]["segment_size"], ) self.train_loader = get_data_loader( self.train_dataset, frame_size=self.config["data_loader"]["frame_size"], batch_size=self.config["data_loader"]["batch_size"], shuffle=self.config["data_loader"]["shuffle"], num_workers=0, drop_last=False, ) self.train_iter = infinite_iter(self.train_loader) return def build_model(self): # create model, discriminator, optimizers self.model = cc(AE(self.config)) print(self.model) optimizer = self.config["optimizer"] self.opt = flow.optim.Adam( self.model.parameters(), lr=optimizer["lr"], betas=(optimizer["beta1"], optimizer["beta2"]), amsgrad=optimizer["amsgrad"], weight_decay=optimizer["weight_decay"], ) return def ae_step(self, data, lambda_kl): x = cc(data) mu, log_sigma, emb, dec = self.model(x) criterion = nn.L1Loss() loss_rec = criterion(dec, x) loss_kl = 0.5 * flow.mean( flow.exp(log_sigma) + flow.mul(mu, mu) - 1 - log_sigma ) loss = self.config["lambda"]["lambda_rec"] * loss_rec + lambda_kl * loss_kl self.opt.zero_grad() loss.backward() grad_norm = flow.nn.utils.clip_grad_norm_( self.model.parameters(), max_norm=self.config["optimizer"]["grad_norm"] ) self.opt.step() meta = { "loss_rec": loss_rec.item(), "loss_kl": loss_kl.item(), "loss": loss.item(), "grad_norm": grad_norm, } return meta def train(self, n_iterations): start = time.time() for iteration in range(n_iterations): if iteration >= self.config["annealing_iters"]: lambda_kl = self.config["lambda"]["lambda_kl"] else: lambda_kl = ( self.config["lambda"]["lambda_kl"] * (iteration + 1) / self.config["annealing_iters"] ) data = next(self.train_iter) meta = self.ae_step(data, lambda_kl) if iteration % self.args.summary_steps == 0: print( "Iter {0} | loss_kl {1:.3f} | " "loss_rec {2:.3f} | loss {3:.3f}".format( iteration, meta["loss_kl"], meta["loss_rec"], meta["loss"], ), flush=True, ) if ( iteration + 1 ) % self.args.save_steps == 0 or iteration + 1 == n_iterations: file_path = os.path.join( self.args.store_model_path, "iteration%d.pth.tar" % (iteration + 1) ) flow.save(self.model.state_dict(), file_path) print("Saving checkpoint model to %s" % file_path) for dirs in os.listdir(self.args.store_model_path): dir_name = os.path.join(self.args.store_model_path, dirs) dir = dir_name.split("/")[-1] dir = re.findall(r"\d+", dir) if dir == []: dir = 100000000 else: dir = int(dir[0]) if (iteration + 1) - dir >= 24999: shutil.rmtree(dir_name) print("Train Time {0:.2f}s".format(time.time() - start)) return
[ "oneflow.mul", "oneflow.nn.L1Loss", "oneflow.exp" ]
[((541, 595), 'os.makedirs', 'os.makedirs', (['self.args.store_model_path'], {'exist_ok': '(True)'}), '(self.args.store_model_path, exist_ok=True)\n', (552, 595), False, 'import os\n'), ((1367, 1601), 'data_utils.get_data_loader', 'get_data_loader', (['self.train_dataset'], {'frame_size': "self.config['data_loader']['frame_size']", 'batch_size': "self.config['data_loader']['batch_size']", 'shuffle': "self.config['data_loader']['shuffle']", 'num_workers': '(0)', 'drop_last': '(False)'}), "(self.train_dataset, frame_size=self.config['data_loader'][\n 'frame_size'], batch_size=self.config['data_loader']['batch_size'],\n shuffle=self.config['data_loader']['shuffle'], num_workers=0, drop_last\n =False)\n", (1382, 1601), False, 'from data_utils import get_data_loader\n'), ((2349, 2360), 'oneflow.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2358, 2360), True, 'import oneflow.nn as nn\n'), ((3064, 3075), 'time.time', 'time.time', ([], {}), '()\n', (3073, 3075), False, 'import time\n'), ((860, 885), 'yaml.dump', 'yaml.dump', (['self.config', 'f'], {}), '(self.config, f)\n', (869, 885), False, 'import yaml\n'), ((1142, 1194), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{self.args.train_set}.pkl"""'], {}), "(data_dir, f'{self.args.train_set}.pkl')\n", (1154, 1194), False, 'import os\n'), ((1208, 1258), 'os.path.join', 'os.path.join', (['data_dir', 'self.args.train_index_file'], {}), '(data_dir, self.args.train_index_file)\n', (1220, 1258), False, 'import os\n'), ((1847, 1862), 'model.AE', 'AE', (['self.config'], {}), '(self.config)\n', (1849, 1862), False, 'from model import AE\n'), ((4052, 4137), 'os.path.join', 'os.path.join', (['self.args.store_model_path', "('iteration%d.pth.tar' % (iteration + 1))"], {}), "(self.args.store_model_path, 'iteration%d.pth.tar' % (iteration +\n 1))\n", (4064, 4137), False, 'import os\n'), ((4329, 4367), 'os.listdir', 'os.listdir', (['self.args.store_model_path'], {}), '(self.args.store_model_path)\n', (4339, 4367), False, 'import os\n'), ((4400, 4446), 'os.path.join', 'os.path.join', (['self.args.store_model_path', 'dirs'], {}), '(self.args.store_model_path, dirs)\n', (4412, 4446), False, 'import os\n'), ((4523, 4546), 're.findall', 're.findall', (['"""\\\\d+"""', 'dir'], {}), "('\\\\d+', dir)\n", (4533, 4546), False, 'import re\n'), ((4835, 4846), 'time.time', 'time.time', ([], {}), '()\n', (4844, 4846), False, 'import time\n'), ((4768, 4791), 'shutil.rmtree', 'shutil.rmtree', (['dir_name'], {}), '(dir_name)\n', (4781, 4791), False, 'import shutil\n'), ((2445, 2464), 'oneflow.exp', 'flow.exp', (['log_sigma'], {}), '(log_sigma)\n', (2453, 2464), True, 'import oneflow as flow\n'), ((2467, 2483), 'oneflow.mul', 'flow.mul', (['mu', 'mu'], {}), '(mu, mu)\n', (2475, 2483), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re import os import unittest import numpy as np import oneflow import oneflow as flow import oneflow.framework.graph_build_util as graph_build_util import oneflow.unittest @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") @flow.unittest.skip_unless_1n1d() class TestGraphActivationCheckpoint(flow.unittest.TestCase): def test_activation_checkpoint(test_case): loss_fn = flow.nn.MSELoss(reduction="sum") model = flow.nn.Sequential(flow.nn.Linear(3, 4), flow.nn.Linear(4, 4)) model1 = flow.nn.Sequential(flow.nn.Linear(4, 1), flow.nn.Flatten(0, 1)) class SubModule0(flow.nn.Module): def __init__(self): super().__init__() self.model = model def forward(self, x): scope = oneflow.current_scope() scope_proto = graph_build_util.scope_to_proto(scope) ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool test_case.assertEqual(ck_bool, True) out = self.model(x) return out class SubModule1(flow.nn.Module): def __init__(self): super().__init__() self.model = model1 def forward(self, x): scope = oneflow.current_scope() scope_proto = graph_build_util.scope_to_proto(scope) ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool test_case.assertEqual(ck_bool, True) out = self.model(x) return out optimizer = flow.optim.SGD(model.parameters(), lr=1e-6) class LinearTrainGraph(flow.nn.Graph): def __init__(self): super().__init__() self.model = SubModule0() self.model1 = SubModule1() self.loss_fn = loss_fn # Add an optimizer self.add_optimizer(optimizer) self.model.config.activation_checkpointing = True self.model1.config.activation_checkpointing = True def build(self, x, y): y_pred = self.model(x) y_pred = self.model1(y_pred) loss = self.loss_fn(y_pred, y) loss.backward() return loss linear_graph = LinearTrainGraph() x = flow.randn(10, 3) y = flow.randn(10) linear_graph._compile(x, y) graph_proto = linear_graph._full_graph_proto for op in graph_proto.net.op: # Check flatten gradient operator take checkpoiting as input if re.search("flatten.*grad", op.name, re.I) is not None: find_check_point = False for value in op.user_conf.input.values(): if ( re.search( "OneFlow-System-Checkpointing-Fake-Fw-Op", str(value), re.I ) is not None ): find_check_point = True print(value) test_case.assertTrue(find_check_point) # Check having insert identity op and first fake op of a segment has indentity grad as it's ctrl in op if ( re.search( "OneFlow-System-Checkpointing-Fake-Fw-Op_model.model.0-matmul*", op.name, re.I, ) is not None ): find_ctrl = False for name in op.ctrl_in_op_name: if re.search("identity-.*_grad", str(name), re.I) is not None: find_ctrl = True print(name) test_case.assertTrue(find_ctrl) if __name__ == "__main__": unittest.main()
[ "oneflow.nn.Linear", "oneflow.nn.Flatten", "oneflow.current_scope", "oneflow.randn", "oneflow.framework.graph_build_util.scope_to_proto", "oneflow.unittest.skip_unless_1n1d", "oneflow.nn.MSELoss" ]
[((860, 892), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (890, 892), True, 'import oneflow as flow\n'), ((800, 834), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (809, 834), False, 'import os\n'), ((4478, 4493), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4491, 4493), False, 'import unittest\n'), ((1019, 1051), 'oneflow.nn.MSELoss', 'flow.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1034, 1051), True, 'import oneflow as flow\n'), ((3007, 3024), 'oneflow.randn', 'flow.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (3017, 3024), True, 'import oneflow as flow\n'), ((3037, 3051), 'oneflow.randn', 'flow.randn', (['(10)'], {}), '(10)\n', (3047, 3051), True, 'import oneflow as flow\n'), ((1087, 1107), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(4)'], {}), '(3, 4)\n', (1101, 1107), True, 'import oneflow as flow\n'), ((1109, 1129), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(4)', '(4)'], {}), '(4, 4)\n', (1123, 1129), True, 'import oneflow as flow\n'), ((1167, 1187), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(4)', '(1)'], {}), '(4, 1)\n', (1181, 1187), True, 'import oneflow as flow\n'), ((1189, 1210), 'oneflow.nn.Flatten', 'flow.nn.Flatten', (['(0)', '(1)'], {}), '(0, 1)\n', (1204, 1210), True, 'import oneflow as flow\n'), ((1416, 1439), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (1437, 1439), False, 'import oneflow\n'), ((1470, 1508), 'oneflow.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (1501, 1508), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((1914, 1937), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (1935, 1937), False, 'import oneflow\n'), ((1968, 2006), 'oneflow.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (1999, 2006), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((3268, 3309), 're.search', 're.search', (['"""flatten.*grad"""', 'op.name', 're.I'], {}), "('flatten.*grad', op.name, re.I)\n", (3277, 3309), False, 'import re\n'), ((3943, 4036), 're.search', 're.search', (['"""OneFlow-System-Checkpointing-Fake-Fw-Op_model.model.0-matmul*"""', 'op.name', 're.I'], {}), "('OneFlow-System-Checkpointing-Fake-Fw-Op_model.model.0-matmul*',\n op.name, re.I)\n", (3952, 4036), False, 'import re\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import os import oneflow as flow import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util import oneflow.python.framework.dtype as dtype_util import oneflow.python.framework.distribute as distribute_util import oneflow.python.framework.id_util as id_util import oneflow.python.framework.remote_blob as remote_blob_util from oneflow.python.oneflow_export import oneflow_export import oneflow_api from typing import Optional, Union @oneflow_export("one_hot") def one_hot( indices: oneflow_api.BlobDesc, depth: int, on_value: Union[int, float] = 1, off_value: Union[int, float] = 0, axis: int = -1, dtype: Optional[dtype_util.dtype] = None, name: Optional[str] = None, ) -> oneflow_api.BlobDesc: """This operator generates a onehot Blob from input Blob. If input Blob's rank is `N`, the corresponding onehot Blob's rank is `N+1`. The new axis is generated on the specified dimension according to the parameter `axis`. The locations represented by `indices` take value `on_value`, while other locations take `off_value` Args: indices (oneflow_api.BlobDesc): The input Blob. depth (int): The length of onehot Blob. on_value (Union[int, float], optional): The fill value when `indices[i] == i`. Defaults to 1. off_value (Union[int, float], optional): The fill value when `indice[i] != i`. Defaults to 0. axis (int, optional): The specified dimension that the new axis is generated on. Defaults to -1. dtype (Optional[dtype_util.dtype], optional): The output data type, it can be "oneflow.int32", "oneflow.int64", "oneflow.float", "oneflow.double". Defaults to None. name (Optional[str], optional): The name for the operation. Defaults to None. Note: The data type of input blob should be `int32` or `int64` For example: Example 1: .. code-block:: python import oneflow as flow import oneflow.typing as tp import numpy as np @flow.global_function() def onehot_Job(x: tp.Numpy.Placeholder((4, ), dtype=flow.int32) ) -> tp.Numpy: return flow.one_hot(indices=x, depth=5, axis=-1, dtype=flow.int32) x = np.array([0, 3, 1, 2]).astype(np.int32) out = onehot_Job(x) # out [[1 0 0 0 0] # [0 0 0 1 0] # [0 1 0 0 0] # [0 0 1 0 0]] Example 2: .. code-block:: python import oneflow as flow import oneflow.typing as tp import numpy as np @flow.global_function() def onehot_Job(x: tp.Numpy.Placeholder((4, ), dtype=flow.int32) ) -> tp.Numpy: return flow.one_hot(indices=x, depth=5, axis=0, dtype=flow.int32) x = np.array([0, 3, 1, 2]).astype(np.int32) out = onehot_Job(x) # out [[1 0 0 0] # [0 0 1 0] # [0 0 0 1] # [0 1 0 0] # [0 0 0 0]] Returns: oneflow_api.BlobDesc: [description] """ out_ndims = len(indices.shape) + 1 if axis < 0: axis += out_ndims assert axis >= 0 and axis < out_ndims, ValueError( "Expected axis to between [%d, %d). But received: %d " % (-out_ndims, out_ndims, axis) ) out = ( flow.user_op_builder(name if name is not None else id_util.UniqueStr("OneHot_")) .Op("one_hot") .Input("indices", [indices]) .Attr("depth", int(depth)) .Attr("floating_on_value", float(on_value)) .Attr("integer_on_value", int(on_value)) .Attr("floating_off_value", float(off_value)) .Attr("integer_off_value", int(off_value)) .Attr("dtype", dtype) .Output("out") .Build() .InferAndTryRun() .RemoteBlobList()[0] ) if axis != (out_ndims - 1): dim_list = list(range(0, out_ndims)) dim_list.insert(axis, out_ndims - 1) dim_list.pop() return flow.transpose(out, dim_list) else: return out
[ "oneflow.python.framework.id_util.UniqueStr", "oneflow.transpose", "oneflow.python.oneflow_export.oneflow_export" ]
[((1137, 1162), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""one_hot"""'], {}), "('one_hot')\n", (1151, 1162), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4856, 4885), 'oneflow.transpose', 'flow.transpose', (['out', 'dim_list'], {}), '(out, dim_list)\n', (4870, 4885), True, 'import oneflow as flow\n'), ((4234, 4262), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OneHot_"""'], {}), "('OneHot_')\n", (4251, 4262), True, 'import oneflow.python.framework.id_util as id_util\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import copy import traceback import oneflow.core.common.data_type_pb2 as data_type_util import oneflow.python.framework.distribute as distribute_util import oneflow.python.lib.core.traceinfo as traceinfo class BlobDesc(object): def __init__(self, lbi, distribute=distribute_util.auto(), disable_boxing=None): self.lbi_ = lbi self.lbn_ = lbi.op_name + "/" + lbi.blob_name self.distribute_ = distribute self.disable_boxing_ = disable_boxing @property def lbi(self): return self.lbi_ @property def logical_blob_name(self): return self.lbn_ @property def op_name(self): return self.lbi_.op_name @property def blob_name(self): return self.lbi_.blob_name @property def shape(self): raise NotImplementedError @property def dtype(self): raise NotImplementedError @property def batch_axis(self): raise NotImplementedError def has_batch_axis(self): batch_axis = self.batch_axis ret = batch_axis is not None if ret: assert type(batch_axis) is int return ret @property def is_dynamic(self): raise NotImplementedError @property def disable_boxing(self): return self.disable_boxing_ @property def is_tensor_list(self): raise NotImplementedError @property def parallel_conf(self): raise NotImplementedError def with_boxing_disabled(self, val=True): ret = self.Clone() ret.disable_boxing_ = val return ret def with_distribute(self, distribute): ret = self.Clone() ret.distribute_ = distribute return ret def with_split_distribute(self, axis): return self.with_distribute(distribute_util.split(axis)) def with_broadcast_distribute(self): return self.with_distribute(distribute_util.broadcast()) @property def distribute(self): distribute_util.assert_is_valid_distribute(self.distribute_) return self.distribute_ @property def unique_name(self): return self.lbn_ + self._Distribute2Str() + self._DisableBoxing2Str() def Clone(self): return copy.deepcopy(self) def _Distribute2Str(self): if type(self.distribute_) is distribute_util.AutoDistribute: return "" elif type(self.distribute_) is distribute_util.SplitDistribute: return ":S" + str(self.distribute_.axis) elif type(self.distribute_) is distribute_util.BroadcastDistribute: return ":B" else: raise NotImplementedError def _DisableBoxing2Str(self): if self.disable_boxing_ is None: return "" if self.disable_boxing_ is False: return "|0" if self.disable_boxing_ is True: return "|1" raise NotImplementedError
[ "oneflow.python.framework.distribute.assert_is_valid_distribute", "oneflow.python.framework.distribute.auto", "oneflow.python.framework.distribute.split", "oneflow.python.framework.distribute.broadcast" ]
[((900, 922), 'oneflow.python.framework.distribute.auto', 'distribute_util.auto', ([], {}), '()\n', (920, 922), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((2616, 2676), 'oneflow.python.framework.distribute.assert_is_valid_distribute', 'distribute_util.assert_is_valid_distribute', (['self.distribute_'], {}), '(self.distribute_)\n', (2658, 2676), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((2866, 2885), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2879, 2885), False, 'import copy\n'), ((2431, 2458), 'oneflow.python.framework.distribute.split', 'distribute_util.split', (['axis'], {}), '(axis)\n', (2452, 2458), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((2538, 2565), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (2563, 2565), True, 'import oneflow.python.framework.distribute as distribute_util\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from typing import Optional, Sequence, Union from oneflow.compatible import single_client as flow from oneflow.compatible.single_client.python.framework import ( distribute as distribute_util, ) from oneflow.compatible.single_client.python.framework import hob as hob from oneflow.compatible.single_client.python.framework import id_util as id_util from oneflow.compatible.single_client.python.framework import ( remote_blob as remote_blob_util, ) from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if from oneflow.compatible.single_client.python.ops import ( user_op_builder as user_op_builder, ) from oneflow.compatible.single_client.python.oneflow_export import oneflow_export import oneflow._oneflow_internal @oneflow_export("math.two_stage_reduce_max") def api_two_stage_reduce_max( x: oneflow._oneflow_internal.BlobDesc, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: func = enable_if.unique([two_stage_reduce_max]) return func(x, axis=axis, keepdims=keepdims, name=name) @enable_if.condition(hob.in_global_mode) def two_stage_reduce_max(x, axis=None, keepdims=False, name=None): name = name if name is not None else id_util.UniqueStr("ReduceMax_") return two_stage_reduce(x, axis, keepdims, "reduce_max", name) @oneflow_export("math.two_stage_reduce_min") def api_two_stage_reduce_min( x: oneflow._oneflow_internal.BlobDesc, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: func = enable_if.unique([two_stage_reduce_min]) return func(x, axis=axis, keepdims=keepdims, name=name) @enable_if.condition(hob.in_global_mode) def two_stage_reduce_min(x, axis=None, keepdims=False, name=None): name = name if name is not None else id_util.UniqueStr("ReduceMin_") return two_stage_reduce(x, axis, keepdims, "reduce_min", name) def two_stage_reduce(x, axis=None, keepdims=False, op_type_name=None, name=None): assert check_x_dictribute(x, axis) axis = _check_axis(axis, x.shape) device_stage_out_list = [] device_stage_count_list = [] distribute_axis = x.distribute.axis x_list = flow.advanced.distribute_split(x, axis=distribute_axis) parallel_desc_symbol = flow.current_scope().device_parallel_desc_symbol device_tag = parallel_desc_symbol.device_tag parallel_id = 0 for ( machine_id, device_ids, ) in parallel_desc_symbol.machine_id2device_id_list.items(): for device_id in device_ids: with flow.scope.placement( device_tag, "@" + str(machine_id) + ":" + str(device_id) ): device_stage_out, device_stage_count = reduce_device_stage( x_list[parallel_id], axis, op_type_name + "_device_stage", name + "_device_stage" + str(parallel_id), ) device_stage_out_list.append(device_stage_out) device_stage_count_list.append(device_stage_count) parallel_id += 1 device_stage_out = flow.advanced.distribute_concat( device_stage_out_list, axis=distribute_axis ) device_stage_count = flow.advanced.distribute_concat( device_stage_count_list, axis=distribute_axis ) device_stage_out = device_stage_out.with_distribute(flow.distribute.broadcast()) device_stage_count = device_stage_count.with_distribute(flow.distribute.broadcast()) out = reduce_global_stage( device_stage_out, device_stage_count, axis, keepdims, op_type_name + "_global_stage", name + "_global_stage", ) return out def reduce_device_stage(x, axis, op_name, name): out, mask, count = ( flow.user_op_builder(name) .Op(op_name) .Input("in", [x]) .Output("out") .Output("mask") .Output("count") .Attr("axis", axis) .Build() .InferAndTryRun() .RemoteBlobList() ) return out, count def reduce_global_stage(x, device_count, axis, keepdims, op_name, name): out, mask = ( flow.user_op_builder(name) .Op(op_name) .Input("in", [x]) .Input("device_count", [device_count]) .Output("out") .Output("mask") .Attr("axis", axis) .Attr("keepdims", keepdims) .Build() .InferAndTryRun() .RemoteBlobList() ) return out def _check_axis(axis, shape): if axis is None: axis = list(range(len(shape))) if isinstance(axis, int): axis = [axis] assert isinstance(axis, (list, tuple)), "Invalid axis {}".format(axis) for x in axis: if x < 0: x += len(shape) assert x >= 0 and x < len(shape), "Invalid axis {}".format(axis) return axis def check_x_dictribute(x, axis): for i in axis: if x.distribute is oneflow._oneflow_internal.distribute.split(i): return True return False
[ "oneflow.compatible.single_client.python.lib.core.enable_if.unique", "oneflow.compatible.single_client.python.lib.core.enable_if.condition", "oneflow.compatible.single_client.advanced.distribute_split", "oneflow.compatible.single_client.python.framework.id_util.UniqueStr", "oneflow.compatible.single_client.advanced.distribute_concat", "oneflow.compatible.single_client.user_op_builder", "oneflow.compatible.single_client.current_scope", "oneflow.compatible.single_client.distribute.broadcast", "oneflow.compatible.single_client.python.oneflow_export.oneflow_export" ]
[((1389, 1432), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.two_stage_reduce_max"""'], {}), "('math.two_stage_reduce_max')\n", (1403, 1432), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((1776, 1815), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['hob.in_global_mode'], {}), '(hob.in_global_mode)\n', (1795, 1815), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2026, 2069), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.two_stage_reduce_min"""'], {}), "('math.two_stage_reduce_min')\n", (2040, 2069), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((2413, 2452), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['hob.in_global_mode'], {}), '(hob.in_global_mode)\n', (2432, 2452), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((1672, 1712), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[two_stage_reduce_max]'], {}), '([two_stage_reduce_max])\n', (1688, 1712), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2309, 2349), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[two_stage_reduce_min]'], {}), '([two_stage_reduce_min])\n', (2325, 2349), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2940, 2995), 'oneflow.compatible.single_client.advanced.distribute_split', 'flow.advanced.distribute_split', (['x'], {'axis': 'distribute_axis'}), '(x, axis=distribute_axis)\n', (2970, 2995), True, 'from oneflow.compatible import single_client as flow\n'), ((3882, 3958), 'oneflow.compatible.single_client.advanced.distribute_concat', 'flow.advanced.distribute_concat', (['device_stage_out_list'], {'axis': 'distribute_axis'}), '(device_stage_out_list, axis=distribute_axis)\n', (3913, 3958), True, 'from oneflow.compatible import single_client as flow\n'), ((3998, 4076), 'oneflow.compatible.single_client.advanced.distribute_concat', 'flow.advanced.distribute_concat', (['device_stage_count_list'], {'axis': 'distribute_axis'}), '(device_stage_count_list, axis=distribute_axis)\n', (4029, 4076), True, 'from oneflow.compatible import single_client as flow\n'), ((1924, 1955), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ReduceMax_"""'], {}), "('ReduceMax_')\n", (1941, 1955), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((2561, 2592), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ReduceMin_"""'], {}), "('ReduceMin_')\n", (2578, 2592), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((3023, 3043), 'oneflow.compatible.single_client.current_scope', 'flow.current_scope', ([], {}), '()\n', (3041, 3043), True, 'from oneflow.compatible import single_client as flow\n'), ((4148, 4175), 'oneflow.compatible.single_client.distribute.broadcast', 'flow.distribute.broadcast', ([], {}), '()\n', (4173, 4175), True, 'from oneflow.compatible import single_client as flow\n'), ((4237, 4264), 'oneflow.compatible.single_client.distribute.broadcast', 'flow.distribute.broadcast', ([], {}), '()\n', (4262, 4264), True, 'from oneflow.compatible import single_client as flow\n'), ((4561, 4587), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (4581, 4587), True, 'from oneflow.compatible import single_client as flow\n'), ((4933, 4959), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (4953, 4959), True, 'from oneflow.compatible import single_client as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import math import oneflow.experimental as flow from test_util import GenArgList def _nd_tuple_to_dhw(nd_tuple, dim, prefix=1, dhw_offset=0): assert dim <= 3 assert dim == len(nd_tuple) - dhw_offset nd_tuple = list(nd_tuple) dhw_tuple = nd_tuple[:dhw_offset] dhw_tuple.extend([prefix for _ in range(3 - dim)]) dhw_tuple.extend(nd_tuple[dhw_offset:]) return tuple(dhw_tuple) def _dhw_tuple_to_nd(dhw_tuple, dim, prefix=1, dhw_offset=0): assert dim <= 3 assert 3 == len(dhw_tuple) - dhw_offset dhw_tuple = list(dhw_tuple) nd_tuple = dhw_tuple[:dhw_offset] nd_offset = dhw_offset + 3 - dim for i in dhw_tuple[dhw_offset:nd_offset]: assert prefix == i nd_tuple.extend(dhw_tuple[nd_offset:]) return tuple(nd_tuple) class MaxPoolNumpy: def __init__(self, dim=2, kernel_size=(2, 2), stride=(2, 2), padding=(0, 0)): self.dim = dim self.stride = _nd_tuple_to_dhw(stride, dim) self.padding = _nd_tuple_to_dhw(padding, dim, prefix=0) self.kernel_size = _nd_tuple_to_dhw(kernel_size, dim) self.w_depth = self.kernel_size[0] self.w_height = self.kernel_size[1] self.w_width = self.kernel_size[2] self.min_val = np.finfo(np.float64).min def __call__(self, x): self.x_shape = x.shape x_shape_5d = _nd_tuple_to_dhw(self.x_shape, self.dim, prefix=1, dhw_offset=2) x = x.reshape(x_shape_5d) self.in_batch = np.shape(x)[0] self.in_channel = np.shape(x)[1] self.in_depth = np.shape(x)[2] self.in_height = np.shape(x)[3] self.in_width = np.shape(x)[4] pad_x = np.pad( x, ( (0, 0), (0, 0), (self.padding[0], self.padding[0]), (self.padding[1], self.padding[1]), (self.padding[2], self.padding[2]), ), "constant", constant_values=(self.min_val, self.min_val), ) self.pad_x = pad_x self.pad_shape = pad_x.shape self.out_depth = int((self.in_depth - self.w_depth) / self.stride[0]) + 1 self.out_height = int((self.in_height - self.w_height) / self.stride[1]) + 1 self.out_width = int((self.in_width - self.w_width) / self.stride[2]) + 1 self.pad_out_depth = np.uint16( math.ceil((self.pad_shape[2] - self.w_depth + 1) / self.stride[0]) ) self.pad_out_height = np.uint16( math.ceil((self.pad_shape[3] - self.w_height + 1) / self.stride[1]) ) self.pad_out_width = np.uint16( math.ceil((self.pad_shape[4] - self.w_width + 1) / self.stride[2]) ) out = np.zeros( ( self.in_batch, self.in_channel, self.pad_out_depth, self.pad_out_height, self.pad_out_width, ) ) self.arg_max = np.zeros_like(out, dtype=np.int32) for n in range(self.in_batch): for c in range(self.in_channel): for i in range(self.pad_out_depth): for j in range(self.pad_out_height): for k in range(self.pad_out_width): start_i = i * self.stride[0] start_j = j * self.stride[1] start_k = k * self.stride[2] end_i = start_i + self.w_depth end_j = start_j + self.w_height end_k = start_k + self.w_width out[n, c, i, j, k] = np.max( pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k] ) self.arg_max[n, c, i, j, k] = np.argmax( pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k] ) self.out_shape_5d = out.shape out_shape = _dhw_tuple_to_nd(out.shape, self.dim, dhw_offset=2) out = out.reshape(out_shape) return out def backward(self, d_loss): d_loss = d_loss.reshape(self.out_shape_5d) dx = np.zeros_like(self.pad_x) for n in range(self.in_batch): for c in range(self.in_channel): for i in range(self.pad_out_depth): for j in range(self.pad_out_height): for k in range(self.pad_out_width): start_i = i * self.stride[0] start_j = j * self.stride[1] start_k = k * self.stride[2] end_i = start_i + self.w_depth end_j = start_j + self.w_height end_k = start_k + self.w_width index = np.unravel_index( self.arg_max[n, c, i, j, k], self.kernel_size ) dx[n, c, start_i:end_i, start_j:end_j, start_k:end_k][ index ] += d_loss[n, c, i, j, k] dx = dx[ :, :, self.padding[0] : self.pad_shape[2] - self.padding[0], self.padding[1] : self.pad_shape[3] - self.padding[1], self.padding[2] : self.pad_shape[4] - self.padding[2], ] dx = dx.reshape(self.x_shape) return dx def _test_maxpool1d_impl(test_case, device): input_arr = np.array( [ [ [-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-1.46769364, -0.78125064, 1.50086563, -0.76278226], [1.31984534, 0.20741192, -0.86507054, -0.40776015], [-0.89910823, 0.44932938, 1.49148118, -0.22036761], ], [ [-0.5452334, -0.10255169, -1.42035108, 0.73922913], [-0.03192764, 0.69341935, 0.96263152, -1.52070843], [0.02058239, 1.504032, 1.84423001, -0.0130596], [2.20517719, 0.38449598, 0.85677771, 0.60425179], ], [ [-1.64366213, 0.51370298, -0.21754866, -0.05085382], [1.17065374, 1.13857674, -1.13070507, 0.44353707], [-1.30783846, -0.48031445, 0.41807536, -2.13778887], [0.08259005, 0.5798125, 0.03024696, 1.96100924], ], ] ) kernel_size, stride, padding = (3,), (1,), (1,) output = np.array( [ [ [2.33971243, 2.33971243, 2.33971243, 0.80398747], [-0.78125064, 1.50086563, 1.50086563, 1.50086563], [1.31984534, 1.31984534, 0.20741192, -0.40776015], [0.44932938, 1.49148118, 1.49148118, 1.49148118], ], [ [-0.10255169, -0.10255169, 0.73922913, 0.73922913], [0.69341935, 0.96263152, 0.96263152, 0.96263152], [1.504032, 1.84423001, 1.84423001, 1.84423001], [2.20517719, 2.20517719, 0.85677771, 0.85677771], ], [ [0.51370298, 0.51370298, 0.51370298, -0.05085382], [1.17065374, 1.17065374, 1.13857674, 0.44353707], [-0.48031445, 0.41807536, 0.41807536, 0.41807536], [0.5798125, 0.5798125, 1.96100924, 1.96100924], ], ] ) output_indice = np.array( [ [[1, 1, 1, 3], [1, 2, 2, 2], [0, 0, 1, 3], [1, 2, 2, 2]], [[1, 1, 3, 3], [1, 2, 2, 2], [1, 2, 2, 2], [0, 0, 2, 2]], [[1, 1, 1, 3], [0, 0, 1, 3], [1, 2, 2, 2], [1, 1, 3, 3]], ] ) grad = np.array( [ [ [0.0, 3.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], [2.0, 1.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], ], [ [0.0, 2.0, 0.0, 2.0], [0.0, 1.0, 3.0, 0.0], [0.0, 1.0, 3.0, 0.0], [2.0, 0.0, 2.0, 0.0], ], [ [0.0, 3.0, 0.0, 1.0], [2.0, 1.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], [0.0, 2.0, 0.0, 2.0], ], ] ) m = flow.nn.MaxPool1d( kernel_size=kernel_size, stride=stride, padding=padding, return_indices=True ) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device), requires_grad=True) of_output, of_indice = m(x) y = of_output.sum() y.backward() test_case.assertTrue(np.allclose(x.grad.numpy(), grad, 1e-4, 1e-4)) test_case.assertTrue(np.allclose(of_indice.numpy(), output_indice, 1e-4, 1e-4)) test_case.assertTrue(np.allclose(of_output.numpy(), output, 1e-4, 1e-4)) def _test_maxpool1d_zero_padding(test_case, device): arr = np.arange(1000).reshape(4, 5, 50).astype(np.float) input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device)) m1 = flow.nn.MaxPool1d(kernel_size=3, stride=3, padding=0) of_out = m1(input) m2 = MaxPoolNumpy(2, kernel_size=(3, 1), stride=(3, 1), padding=(0, 0)) np_out = m2(arr.reshape(4, 5, 50, 1)) np_out = np.squeeze(np_out, axis=3) test_case.assertTrue(np.allclose(np_out, of_out.numpy(), 1e-4, 1e-4)) def _test_maxpool2d(test_case, device): dim = 2 input_arr = np.random.randn(2, 3, 4, 5) kernel_size, stride, padding = (3, 3), (1, 1), (1, 1) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d( kernel_size=kernel_size, stride=stride, padding=padding, return_indices=True ) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output, indice = m(x) test_case.assertTrue(indice.shape == x.shape) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) def _test_maxpool2d_ceil_mode(test_case, device): dim = 2 input_arr = np.array( [ [ [ [-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-1.46769364, -0.78125064, 1.50086563, -0.76278226], [1.31984534, 0.20741192, -0.86507054, -0.40776015], [-0.89910823, 0.44932938, 1.49148118, -0.22036761], ], [ [-0.5452334, -0.10255169, -1.42035108, 0.73922913], [-0.03192764, 0.69341935, 0.96263152, -1.52070843], [0.02058239, 1.504032, 1.84423001, -0.0130596], [2.20517719, 0.38449598, 0.85677771, 0.60425179], ], [ [-1.64366213, 0.51370298, -0.21754866, -0.05085382], [1.17065374, 1.13857674, -1.13070507, 0.44353707], [-1.30783846, -0.48031445, 0.41807536, -2.13778887], [0.08259005, 0.5798125, 0.03024696, 1.96100924], ], ], [ [ [0.45173843, -0.34680027, -0.99754943, 0.18539502], [-0.68451047, -0.03217399, 0.44705642, -0.39016231], [-0.18062337, 1.82099303, -0.19113869, 0.85298683], [0.14080452, 0.15306701, -1.02466827, -0.34480665], ], [ [-0.21048489, 0.20933038, -0.09206508, -1.80402519], [-0.52028985, 0.01140166, -1.13452858, 0.96648332], [0.26454393, 0.48343972, -1.84055509, -0.01256443], [0.31024029, 0.11983007, 0.98806488, 0.93557438], ], [ [0.39152445, 0.672159, 0.71289289, -0.68072016], [0.33711062, -1.78106242, 0.34545201, -1.62029359], [0.47343899, -2.3433269, -0.44517497, 0.09004267], [0.26310742, -1.53121271, 0.65028836, 1.3669488], ], ], ] ) ceil_mode_out = np.array( [ [ [ [2.33971243, 2.33971243, 0.80398747], [1.31984534, 1.50086563, -0.22036761], [0.44932938, 1.49148118, -0.22036761], ], [ [0.69341935, 0.96263152, 0.73922913], [2.20517719, 1.84423001, 0.60425179], [2.20517719, 0.85677771, 0.60425179], ], [ [1.17065374, 1.13857674, 0.44353707], [1.17065374, 1.96100924, 1.96100924], [0.5798125, 1.96100924, 1.96100924], ], ], [ [ [0.45173843, 0.44705642, 0.18539502], [1.82099303, 1.82099303, 0.85298683], [0.15306701, 0.15306701, -0.34480665], ], [ [0.20933038, 0.96648332, 0.96648332], [0.48343972, 0.98806488, 0.96648332], [0.31024029, 0.98806488, 0.93557438], ], [ [0.672159, 0.71289289, -0.68072016], [0.47343899, 1.3669488, 1.3669488], [0.26310742, 1.3669488, 1.3669488], ], ], ] ) kernel_size, stride, padding = (3, 3), (2, 2), (1, 1) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m1 = flow.nn.MaxPool2d( kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=False ) m2 = flow.nn.MaxPool2d( kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=True ) m1.to(flow.device(device)) m2.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output1 = m1(x) output2 = m2(x) test_case.assertTrue(np.allclose(numpy_output, output1.numpy(), 1e-4, 1e-4)) test_case.assertTrue(np.allclose(ceil_mode_out, output2.numpy(), 1e-4, 1e-4)) def _test_maxpool2d_special_kernel_size(test_case, device): dim = 2 input_arr = np.random.randn(1, 1, 6, 6) kernel_size, stride, padding = (1, 1), (5, 5), (0, 0) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) def _test_maxpool2d_diff_kernel_stride(test_case, device): dim = 2 input_arr = np.random.randn(9, 7, 32, 20) kernel_size, stride, padding = (2, 4), (4, 5), (1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) def _test_maxpool2d_negative_input(test_case, device): dim = 2 input_arr = -1.23456 * np.ones((1, 1, 1, 1), dtype=np.float32) kernel_size, stride, padding = (5, 5), (5, 5), (2, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) def _test_maxpool2d_backward(test_case, device): dim = 2 input_arr = np.random.randn(6, 4, 7, 9) kernel_size, stride, padding = (4, 4), (1, 1), (1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool2d_special_kernel_size_backward(test_case, device): dim = 2 input_arr = np.random.randn(1, 1, 6, 6) kernel_size, stride, padding = (1, 1), (5, 5), (0, 0) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool2d_diff_kernel_stride_backward(test_case, device): dim = 2 input_arr = np.random.randn(9, 7, 32, 20) kernel_size, stride, padding = (2, 4), (4, 5), (1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool2d_negative_input_backward(test_case, device): dim = 2 input_arr = -1.23456 * np.ones((1, 1, 1, 1), dtype=np.float32) kernel_size, stride, padding = (5, 5), (5, 5), (2, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool3d(test_case, device): dim = 3 input_arr = np.random.randn(2, 3, 7, 9, 13) kernel_size, stride, padding = (2, 3, 4), (2, 3, 4), (1, 1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) def _test_maxpool3d_backward(test_case, device): dim = 3 input_arr = np.random.randn(6, 4, 8, 7, 9) kernel_size, stride, padding = (4, 4, 4), (1, 1, 1), (2, 1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool3d_special_kernel_size_backward(test_case, device): dim = 3 input_arr = np.random.randn(1, 1, 6, 6, 6) kernel_size, stride, padding = (1, 1, 1), (5, 5, 5), (0, 0, 0) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool3d_diff_kernel_stride_backward(test_case, device): dim = 3 input_arr = np.random.randn(9, 7, 48, 32, 20) kernel_size, stride, padding = (6, 2, 4), (5, 4, 5), (3, 1, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) def _test_maxpool3d_negative_input_backward(test_case, device): dim = 3 input_arr = -1.23456 * np.ones((1, 1, 1, 1, 1), dtype=np.float32) kernel_size, stride, padding = (5, 5, 5), (5, 5, 5), (2, 2, 2) m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding) numpy_output = m_numpy(input_arr) m = flow.nn.MaxPool3d(kernel_size=kernel_size, padding=padding) m.to(flow.device(device)) x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device)) output = m(x) test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4)) output = output.sum() output.backward() doutput = np.ones_like(numpy_output, dtype=np.float64) numpy_grad = m_numpy.backward(doutput) test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-4, 1e-4)) @flow.unittest.skip_unless_1n1d() class TestPooling(flow.unittest.TestCase): def test_maxpool1d(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [_test_maxpool1d_impl, _test_maxpool1d_zero_padding] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) def test_maxpool2d(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_maxpool2d, _test_maxpool2d_ceil_mode, _test_maxpool2d_special_kernel_size, _test_maxpool2d_diff_kernel_stride, _test_maxpool2d_negative_input, _test_maxpool2d_backward, _test_maxpool2d_special_kernel_size_backward, _test_maxpool2d_diff_kernel_stride_backward, _test_maxpool2d_negative_input_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) def test_maxpool3d(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_maxpool3d, _test_maxpool3d_backward, _test_maxpool3d_special_kernel_size_backward, _test_maxpool3d_negative_input_backward, _test_maxpool3d_diff_kernel_stride_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.nn.MaxPool1d", "oneflow.experimental.device", "oneflow.experimental.nn.MaxPool3d", "oneflow.experimental.nn.MaxPool2d", "oneflow.experimental.unittest.skip_unless_1n1d" ]
[((23252, 23284), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (23282, 23284), True, 'import oneflow.experimental as flow\n'), ((6240, 6916), 'numpy.array', 'np.array', (['[[[-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-1.46769364, -\n 0.78125064, 1.50086563, -0.76278226], [1.31984534, 0.20741192, -\n 0.86507054, -0.40776015], [-0.89910823, 0.44932938, 1.49148118, -\n 0.22036761]], [[-0.5452334, -0.10255169, -1.42035108, 0.73922913], [-\n 0.03192764, 0.69341935, 0.96263152, -1.52070843], [0.02058239, 1.504032,\n 1.84423001, -0.0130596], [2.20517719, 0.38449598, 0.85677771, \n 0.60425179]], [[-1.64366213, 0.51370298, -0.21754866, -0.05085382], [\n 1.17065374, 1.13857674, -1.13070507, 0.44353707], [-1.30783846, -\n 0.48031445, 0.41807536, -2.13778887], [0.08259005, 0.5798125, \n 0.03024696, 1.96100924]]]'], {}), '([[[-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-1.46769364,\n -0.78125064, 1.50086563, -0.76278226], [1.31984534, 0.20741192, -\n 0.86507054, -0.40776015], [-0.89910823, 0.44932938, 1.49148118, -\n 0.22036761]], [[-0.5452334, -0.10255169, -1.42035108, 0.73922913], [-\n 0.03192764, 0.69341935, 0.96263152, -1.52070843], [0.02058239, 1.504032,\n 1.84423001, -0.0130596], [2.20517719, 0.38449598, 0.85677771, \n 0.60425179]], [[-1.64366213, 0.51370298, -0.21754866, -0.05085382], [\n 1.17065374, 1.13857674, -1.13070507, 0.44353707], [-1.30783846, -\n 0.48031445, 0.41807536, -2.13778887], [0.08259005, 0.5798125, \n 0.03024696, 1.96100924]]])\n', (6248, 6916), True, 'import numpy as np\n'), ((7238, 7899), 'numpy.array', 'np.array', (['[[[2.33971243, 2.33971243, 2.33971243, 0.80398747], [-0.78125064, \n 1.50086563, 1.50086563, 1.50086563], [1.31984534, 1.31984534, \n 0.20741192, -0.40776015], [0.44932938, 1.49148118, 1.49148118, \n 1.49148118]], [[-0.10255169, -0.10255169, 0.73922913, 0.73922913], [\n 0.69341935, 0.96263152, 0.96263152, 0.96263152], [1.504032, 1.84423001,\n 1.84423001, 1.84423001], [2.20517719, 2.20517719, 0.85677771, \n 0.85677771]], [[0.51370298, 0.51370298, 0.51370298, -0.05085382], [\n 1.17065374, 1.17065374, 1.13857674, 0.44353707], [-0.48031445, \n 0.41807536, 0.41807536, 0.41807536], [0.5798125, 0.5798125, 1.96100924,\n 1.96100924]]]'], {}), '([[[2.33971243, 2.33971243, 2.33971243, 0.80398747], [-0.78125064, \n 1.50086563, 1.50086563, 1.50086563], [1.31984534, 1.31984534, \n 0.20741192, -0.40776015], [0.44932938, 1.49148118, 1.49148118, \n 1.49148118]], [[-0.10255169, -0.10255169, 0.73922913, 0.73922913], [\n 0.69341935, 0.96263152, 0.96263152, 0.96263152], [1.504032, 1.84423001,\n 1.84423001, 1.84423001], [2.20517719, 2.20517719, 0.85677771, \n 0.85677771]], [[0.51370298, 0.51370298, 0.51370298, -0.05085382], [\n 1.17065374, 1.17065374, 1.13857674, 0.44353707], [-0.48031445, \n 0.41807536, 0.41807536, 0.41807536], [0.5798125, 0.5798125, 1.96100924,\n 1.96100924]]])\n', (7246, 7899), True, 'import numpy as np\n'), ((8176, 8368), 'numpy.array', 'np.array', (['[[[1, 1, 1, 3], [1, 2, 2, 2], [0, 0, 1, 3], [1, 2, 2, 2]], [[1, 1, 3, 3], [\n 1, 2, 2, 2], [1, 2, 2, 2], [0, 0, 2, 2]], [[1, 1, 1, 3], [0, 0, 1, 3],\n [1, 2, 2, 2], [1, 1, 3, 3]]]'], {}), '([[[1, 1, 1, 3], [1, 2, 2, 2], [0, 0, 1, 3], [1, 2, 2, 2]], [[1, 1,\n 3, 3], [1, 2, 2, 2], [1, 2, 2, 2], [0, 0, 2, 2]], [[1, 1, 1, 3], [0, 0,\n 1, 3], [1, 2, 2, 2], [1, 1, 3, 3]]])\n', (8184, 8368), True, 'import numpy as np\n'), ((8434, 8727), 'numpy.array', 'np.array', (['[[[0.0, 3.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], [2.0, 1.0, 0.0, 1.0], [0.0, \n 1.0, 3.0, 0.0]], [[0.0, 2.0, 0.0, 2.0], [0.0, 1.0, 3.0, 0.0], [0.0, 1.0,\n 3.0, 0.0], [2.0, 0.0, 2.0, 0.0]], [[0.0, 3.0, 0.0, 1.0], [2.0, 1.0, 0.0,\n 1.0], [0.0, 1.0, 3.0, 0.0], [0.0, 2.0, 0.0, 2.0]]]'], {}), '([[[0.0, 3.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], [2.0, 1.0, 0.0, 1.0],\n [0.0, 1.0, 3.0, 0.0]], [[0.0, 2.0, 0.0, 2.0], [0.0, 1.0, 3.0, 0.0], [\n 0.0, 1.0, 3.0, 0.0], [2.0, 0.0, 2.0, 0.0]], [[0.0, 3.0, 0.0, 1.0], [2.0,\n 1.0, 0.0, 1.0], [0.0, 1.0, 3.0, 0.0], [0.0, 2.0, 0.0, 2.0]]])\n', (8442, 8727), True, 'import numpy as np\n'), ((9022, 9121), 'oneflow.experimental.nn.MaxPool1d', 'flow.nn.MaxPool1d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'return_indices': '(True)'}), '(kernel_size=kernel_size, stride=stride, padding=padding,\n return_indices=True)\n', (9039, 9121), True, 'import oneflow.experimental as flow\n'), ((9751, 9804), 'oneflow.experimental.nn.MaxPool1d', 'flow.nn.MaxPool1d', ([], {'kernel_size': '(3)', 'stride': '(3)', 'padding': '(0)'}), '(kernel_size=3, stride=3, padding=0)\n', (9768, 9804), True, 'import oneflow.experimental as flow\n'), ((9960, 9986), 'numpy.squeeze', 'np.squeeze', (['np_out'], {'axis': '(3)'}), '(np_out, axis=3)\n', (9970, 9986), True, 'import numpy as np\n'), ((10132, 10159), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (10147, 10159), True, 'import numpy as np\n'), ((10328, 10427), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'return_indices': '(True)'}), '(kernel_size=kernel_size, stride=stride, padding=padding,\n return_indices=True)\n', (10345, 10427), True, 'import oneflow.experimental as flow\n'), ((10763, 12118), 'numpy.array', 'np.array', (['[[[[-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-1.46769364, -\n 0.78125064, 1.50086563, -0.76278226], [1.31984534, 0.20741192, -\n 0.86507054, -0.40776015], [-0.89910823, 0.44932938, 1.49148118, -\n 0.22036761]], [[-0.5452334, -0.10255169, -1.42035108, 0.73922913], [-\n 0.03192764, 0.69341935, 0.96263152, -1.52070843], [0.02058239, 1.504032,\n 1.84423001, -0.0130596], [2.20517719, 0.38449598, 0.85677771, \n 0.60425179]], [[-1.64366213, 0.51370298, -0.21754866, -0.05085382], [\n 1.17065374, 1.13857674, -1.13070507, 0.44353707], [-1.30783846, -\n 0.48031445, 0.41807536, -2.13778887], [0.08259005, 0.5798125, \n 0.03024696, 1.96100924]]], [[[0.45173843, -0.34680027, -0.99754943, \n 0.18539502], [-0.68451047, -0.03217399, 0.44705642, -0.39016231], [-\n 0.18062337, 1.82099303, -0.19113869, 0.85298683], [0.14080452, \n 0.15306701, -1.02466827, -0.34480665]], [[-0.21048489, 0.20933038, -\n 0.09206508, -1.80402519], [-0.52028985, 0.01140166, -1.13452858, \n 0.96648332], [0.26454393, 0.48343972, -1.84055509, -0.01256443], [\n 0.31024029, 0.11983007, 0.98806488, 0.93557438]], [[0.39152445, \n 0.672159, 0.71289289, -0.68072016], [0.33711062, -1.78106242, \n 0.34545201, -1.62029359], [0.47343899, -2.3433269, -0.44517497, \n 0.09004267], [0.26310742, -1.53121271, 0.65028836, 1.3669488]]]]'], {}), '([[[[-0.89042996, 2.33971243, -0.86660827, 0.80398747], [-\n 1.46769364, -0.78125064, 1.50086563, -0.76278226], [1.31984534, \n 0.20741192, -0.86507054, -0.40776015], [-0.89910823, 0.44932938, \n 1.49148118, -0.22036761]], [[-0.5452334, -0.10255169, -1.42035108, \n 0.73922913], [-0.03192764, 0.69341935, 0.96263152, -1.52070843], [\n 0.02058239, 1.504032, 1.84423001, -0.0130596], [2.20517719, 0.38449598,\n 0.85677771, 0.60425179]], [[-1.64366213, 0.51370298, -0.21754866, -\n 0.05085382], [1.17065374, 1.13857674, -1.13070507, 0.44353707], [-\n 1.30783846, -0.48031445, 0.41807536, -2.13778887], [0.08259005, \n 0.5798125, 0.03024696, 1.96100924]]], [[[0.45173843, -0.34680027, -\n 0.99754943, 0.18539502], [-0.68451047, -0.03217399, 0.44705642, -\n 0.39016231], [-0.18062337, 1.82099303, -0.19113869, 0.85298683], [\n 0.14080452, 0.15306701, -1.02466827, -0.34480665]], [[-0.21048489, \n 0.20933038, -0.09206508, -1.80402519], [-0.52028985, 0.01140166, -\n 1.13452858, 0.96648332], [0.26454393, 0.48343972, -1.84055509, -\n 0.01256443], [0.31024029, 0.11983007, 0.98806488, 0.93557438]], [[\n 0.39152445, 0.672159, 0.71289289, -0.68072016], [0.33711062, -\n 1.78106242, 0.34545201, -1.62029359], [0.47343899, -2.3433269, -\n 0.44517497, 0.09004267], [0.26310742, -1.53121271, 0.65028836, \n 1.3669488]]]])\n', (10771, 12118), True, 'import numpy as np\n'), ((12815, 13571), 'numpy.array', 'np.array', (['[[[[2.33971243, 2.33971243, 0.80398747], [1.31984534, 1.50086563, -\n 0.22036761], [0.44932938, 1.49148118, -0.22036761]], [[0.69341935, \n 0.96263152, 0.73922913], [2.20517719, 1.84423001, 0.60425179], [\n 2.20517719, 0.85677771, 0.60425179]], [[1.17065374, 1.13857674, \n 0.44353707], [1.17065374, 1.96100924, 1.96100924], [0.5798125, \n 1.96100924, 1.96100924]]], [[[0.45173843, 0.44705642, 0.18539502], [\n 1.82099303, 1.82099303, 0.85298683], [0.15306701, 0.15306701, -\n 0.34480665]], [[0.20933038, 0.96648332, 0.96648332], [0.48343972, \n 0.98806488, 0.96648332], [0.31024029, 0.98806488, 0.93557438]], [[\n 0.672159, 0.71289289, -0.68072016], [0.47343899, 1.3669488, 1.3669488],\n [0.26310742, 1.3669488, 1.3669488]]]]'], {}), '([[[[2.33971243, 2.33971243, 0.80398747], [1.31984534, 1.50086563, \n -0.22036761], [0.44932938, 1.49148118, -0.22036761]], [[0.69341935, \n 0.96263152, 0.73922913], [2.20517719, 1.84423001, 0.60425179], [\n 2.20517719, 0.85677771, 0.60425179]], [[1.17065374, 1.13857674, \n 0.44353707], [1.17065374, 1.96100924, 1.96100924], [0.5798125, \n 1.96100924, 1.96100924]]], [[[0.45173843, 0.44705642, 0.18539502], [\n 1.82099303, 1.82099303, 0.85298683], [0.15306701, 0.15306701, -\n 0.34480665]], [[0.20933038, 0.96648332, 0.96648332], [0.48343972, \n 0.98806488, 0.96648332], [0.31024029, 0.98806488, 0.93557438]], [[\n 0.672159, 0.71289289, -0.68072016], [0.47343899, 1.3669488, 1.3669488],\n [0.26310742, 1.3669488, 1.3669488]]]])\n', (12823, 13571), True, 'import numpy as np\n'), ((14341, 14436), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'ceil_mode': '(False)'}), '(kernel_size=kernel_size, stride=stride, padding=padding,\n ceil_mode=False)\n', (14358, 14436), True, 'import oneflow.experimental as flow\n'), ((14456, 14550), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'ceil_mode': '(True)'}), '(kernel_size=kernel_size, stride=stride, padding=padding,\n ceil_mode=True)\n', (14473, 14550), True, 'import oneflow.experimental as flow\n'), ((14975, 15002), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(6)', '(6)'], {}), '(1, 1, 6, 6)\n', (14990, 15002), True, 'import numpy as np\n'), ((15171, 15245), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (15188, 15245), True, 'import oneflow.experimental as flow\n'), ((15522, 15551), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(32)', '(20)'], {}), '(9, 7, 32, 20)\n', (15537, 15551), True, 'import numpy as np\n'), ((15720, 15794), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (15737, 15794), True, 'import oneflow.experimental as flow\n'), ((16286, 16360), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (16303, 16360), True, 'import oneflow.experimental as flow\n'), ((16627, 16654), 'numpy.random.randn', 'np.random.randn', (['(6)', '(4)', '(7)', '(9)'], {}), '(6, 4, 7, 9)\n', (16642, 16654), True, 'import numpy as np\n'), ((16823, 16897), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (16840, 16897), True, 'import oneflow.experimental as flow\n'), ((17088, 17132), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (17100, 17132), True, 'import numpy as np\n'), ((17353, 17380), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(6)', '(6)'], {}), '(1, 1, 6, 6)\n', (17368, 17380), True, 'import numpy as np\n'), ((17549, 17623), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (17566, 17623), True, 'import oneflow.experimental as flow\n'), ((17814, 17858), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (17826, 17858), True, 'import numpy as np\n'), ((18078, 18107), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(32)', '(20)'], {}), '(9, 7, 32, 20)\n', (18093, 18107), True, 'import numpy as np\n'), ((18276, 18350), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (18293, 18350), True, 'import oneflow.experimental as flow\n'), ((18541, 18585), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (18553, 18585), True, 'import numpy as np\n'), ((19020, 19094), 'oneflow.experimental.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (19037, 19094), True, 'import oneflow.experimental as flow\n'), ((19285, 19329), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (19297, 19329), True, 'import numpy as np\n'), ((19521, 19552), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(7)', '(9)', '(13)'], {}), '(2, 3, 7, 9, 13)\n', (19536, 19552), True, 'import numpy as np\n'), ((19730, 19804), 'oneflow.experimental.nn.MaxPool3d', 'flow.nn.MaxPool3d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (19747, 19804), True, 'import oneflow.experimental as flow\n'), ((20071, 20101), 'numpy.random.randn', 'np.random.randn', (['(6)', '(4)', '(8)', '(7)', '(9)'], {}), '(6, 4, 8, 7, 9)\n', (20086, 20101), True, 'import numpy as np\n'), ((20279, 20353), 'oneflow.experimental.nn.MaxPool3d', 'flow.nn.MaxPool3d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (20296, 20353), True, 'import oneflow.experimental as flow\n'), ((20624, 20668), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (20636, 20668), True, 'import numpy as np\n'), ((20889, 20919), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(6)', '(6)', '(6)'], {}), '(1, 1, 6, 6, 6)\n', (20904, 20919), True, 'import numpy as np\n'), ((21097, 21171), 'oneflow.experimental.nn.MaxPool3d', 'flow.nn.MaxPool3d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (21114, 21171), True, 'import oneflow.experimental as flow\n'), ((21442, 21486), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (21454, 21486), True, 'import numpy as np\n'), ((21706, 21739), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(48)', '(32)', '(20)'], {}), '(9, 7, 48, 32, 20)\n', (21721, 21739), True, 'import numpy as np\n'), ((21917, 21991), 'oneflow.experimental.nn.MaxPool3d', 'flow.nn.MaxPool3d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(kernel_size=kernel_size, stride=stride, padding=padding)\n', (21934, 21991), True, 'import oneflow.experimental as flow\n'), ((22262, 22306), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (22274, 22306), True, 'import numpy as np\n'), ((22753, 22812), 'oneflow.experimental.nn.MaxPool3d', 'flow.nn.MaxPool3d', ([], {'kernel_size': 'kernel_size', 'padding': 'padding'}), '(kernel_size=kernel_size, padding=padding)\n', (22770, 22812), True, 'import oneflow.experimental as flow\n'), ((23083, 23127), 'numpy.ones_like', 'np.ones_like', (['numpy_output'], {'dtype': 'np.float64'}), '(numpy_output, dtype=np.float64)\n', (23095, 23127), True, 'import numpy as np\n'), ((24766, 24781), 'unittest.main', 'unittest.main', ([], {}), '()\n', (24779, 24781), False, 'import unittest\n'), ((2322, 2524), 'numpy.pad', 'np.pad', (['x', '((0, 0), (0, 0), (self.padding[0], self.padding[0]), (self.padding[1], self\n .padding[1]), (self.padding[2], self.padding[2]))', '"""constant"""'], {'constant_values': '(self.min_val, self.min_val)'}), "(x, ((0, 0), (0, 0), (self.padding[0], self.padding[0]), (self.\n padding[1], self.padding[1]), (self.padding[2], self.padding[2])),\n 'constant', constant_values=(self.min_val, self.min_val))\n", (2328, 2524), True, 'import numpy as np\n'), ((3388, 3496), 'numpy.zeros', 'np.zeros', (['(self.in_batch, self.in_channel, self.pad_out_depth, self.pad_out_height,\n self.pad_out_width)'], {}), '((self.in_batch, self.in_channel, self.pad_out_depth, self.\n pad_out_height, self.pad_out_width))\n', (3396, 3496), True, 'import numpy as np\n'), ((3632, 3666), 'numpy.zeros_like', 'np.zeros_like', (['out'], {'dtype': 'np.int32'}), '(out, dtype=np.int32)\n', (3645, 3666), True, 'import numpy as np\n'), ((4897, 4922), 'numpy.zeros_like', 'np.zeros_like', (['self.pad_x'], {}), '(self.pad_x)\n', (4910, 4922), True, 'import numpy as np\n'), ((9141, 9160), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9152, 9160), True, 'import oneflow.experimental as flow\n'), ((10447, 10466), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (10458, 10466), True, 'import oneflow.experimental as flow\n'), ((14571, 14590), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (14582, 14590), True, 'import oneflow.experimental as flow\n'), ((14602, 14621), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (14613, 14621), True, 'import oneflow.experimental as flow\n'), ((15255, 15274), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15266, 15274), True, 'import oneflow.experimental as flow\n'), ((15804, 15823), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15815, 15823), True, 'import oneflow.experimental as flow\n'), ((16078, 16117), 'numpy.ones', 'np.ones', (['(1, 1, 1, 1)'], {'dtype': 'np.float32'}), '((1, 1, 1, 1), dtype=np.float32)\n', (16085, 16117), True, 'import numpy as np\n'), ((16370, 16389), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (16381, 16389), True, 'import oneflow.experimental as flow\n'), ((16907, 16926), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (16918, 16926), True, 'import oneflow.experimental as flow\n'), ((17633, 17652), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (17644, 17652), True, 'import oneflow.experimental as flow\n'), ((18360, 18379), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (18371, 18379), True, 'import oneflow.experimental as flow\n'), ((18812, 18851), 'numpy.ones', 'np.ones', (['(1, 1, 1, 1)'], {'dtype': 'np.float32'}), '((1, 1, 1, 1), dtype=np.float32)\n', (18819, 18851), True, 'import numpy as np\n'), ((19104, 19123), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19115, 19123), True, 'import oneflow.experimental as flow\n'), ((19814, 19833), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19825, 19833), True, 'import oneflow.experimental as flow\n'), ((20363, 20382), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (20374, 20382), True, 'import oneflow.experimental as flow\n'), ((21181, 21200), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (21192, 21200), True, 'import oneflow.experimental as flow\n'), ((22001, 22020), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (22012, 22020), True, 'import oneflow.experimental as flow\n'), ((22533, 22575), 'numpy.ones', 'np.ones', (['(1, 1, 1, 1, 1)'], {'dtype': 'np.float32'}), '((1, 1, 1, 1, 1), dtype=np.float32)\n', (22540, 22575), True, 'import numpy as np\n'), ((22822, 22841), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (22833, 22841), True, 'import oneflow.experimental as flow\n'), ((23382, 23395), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23393, 23395), False, 'from collections import OrderedDict\n'), ((23544, 23564), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23554, 23564), False, 'from test_util import GenArgList\n'), ((23661, 23674), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23672, 23674), False, 'from collections import OrderedDict\n'), ((24198, 24218), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24208, 24218), False, 'from test_util import GenArgList\n'), ((24315, 24328), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24326, 24328), False, 'from collections import OrderedDict\n'), ((24671, 24691), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24681, 24691), False, 'from test_util import GenArgList\n'), ((1903, 1923), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (1911, 1923), True, 'import numpy as np\n'), ((2131, 2142), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2139, 2142), True, 'import numpy as np\n'), ((2172, 2183), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2180, 2183), True, 'import numpy as np\n'), ((2211, 2222), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2219, 2222), True, 'import numpy as np\n'), ((2251, 2262), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2259, 2262), True, 'import numpy as np\n'), ((2290, 2301), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2298, 2301), True, 'import numpy as np\n'), ((3036, 3102), 'math.ceil', 'math.ceil', (['((self.pad_shape[2] - self.w_depth + 1) / self.stride[0])'], {}), '((self.pad_shape[2] - self.w_depth + 1) / self.stride[0])\n', (3045, 3102), False, 'import math\n'), ((3166, 3233), 'math.ceil', 'math.ceil', (['((self.pad_shape[3] - self.w_height + 1) / self.stride[1])'], {}), '((self.pad_shape[3] - self.w_height + 1) / self.stride[1])\n', (3175, 3233), False, 'import math\n'), ((3296, 3362), 'math.ceil', 'math.ceil', (['((self.pad_shape[4] - self.w_width + 1) / self.stride[2])'], {}), '((self.pad_shape[4] - self.w_width + 1) / self.stride[2])\n', (3305, 3362), False, 'import math\n'), ((9200, 9219), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9211, 9219), True, 'import oneflow.experimental as flow\n'), ((9721, 9740), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9732, 9740), True, 'import oneflow.experimental as flow\n'), ((10506, 10525), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (10517, 10525), True, 'import oneflow.experimental as flow\n'), ((14661, 14680), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (14672, 14680), True, 'import oneflow.experimental as flow\n'), ((15314, 15333), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15325, 15333), True, 'import oneflow.experimental as flow\n'), ((15863, 15882), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15874, 15882), True, 'import oneflow.experimental as flow\n'), ((16429, 16448), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (16440, 16448), True, 'import oneflow.experimental as flow\n'), ((16986, 17005), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (16997, 17005), True, 'import oneflow.experimental as flow\n'), ((17712, 17731), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (17723, 17731), True, 'import oneflow.experimental as flow\n'), ((18439, 18458), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (18450, 18458), True, 'import oneflow.experimental as flow\n'), ((19183, 19202), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19194, 19202), True, 'import oneflow.experimental as flow\n'), ((19873, 19892), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19884, 19892), True, 'import oneflow.experimental as flow\n'), ((20442, 20461), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (20453, 20461), True, 'import oneflow.experimental as flow\n'), ((21260, 21279), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (21271, 21279), True, 'import oneflow.experimental as flow\n'), ((22080, 22099), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (22091, 22099), True, 'import oneflow.experimental as flow\n'), ((22901, 22920), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (22912, 22920), True, 'import oneflow.experimental as flow\n'), ((9614, 9629), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (9623, 9629), True, 'import numpy as np\n'), ((4318, 4382), 'numpy.max', 'np.max', (['pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k]'], {}), '(pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k])\n', (4324, 4382), True, 'import numpy as np\n'), ((4503, 4570), 'numpy.argmax', 'np.argmax', (['pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k]'], {}), '(pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k])\n', (4512, 4570), True, 'import numpy as np\n'), ((5561, 5624), 'numpy.unravel_index', 'np.unravel_index', (['self.arg_max[n, c, i, j, k]', 'self.kernel_size'], {}), '(self.arg_max[n, c, i, j, k], self.kernel_size)\n', (5577, 5624), True, 'import numpy as np\n')]
import oneflow as flow from flowvision.data import Mixup def test_mixup(x, target, switch_prob=0.5, mode="batch"): mixup = Mixup( mixup_alpha=1.0, cutmix_alpha=1.0, switch_prob=switch_prob, label_smoothing=0.0, mode=mode, ) x, target = mixup(x, target) return x, target if __name__ == "__main__": x = flow.randn(16, 3, 224, 224).cuda() target = flow.arange(0, 16).cuda() test_mixup(x, target, mode="elem") test_mixup(x, target, mode="pair") test_mixup(x, target, mode="batch")
[ "oneflow.arange", "oneflow.randn" ]
[((130, 231), 'flowvision.data.Mixup', 'Mixup', ([], {'mixup_alpha': '(1.0)', 'cutmix_alpha': '(1.0)', 'switch_prob': 'switch_prob', 'label_smoothing': '(0.0)', 'mode': 'mode'}), '(mixup_alpha=1.0, cutmix_alpha=1.0, switch_prob=switch_prob,\n label_smoothing=0.0, mode=mode)\n', (135, 231), False, 'from flowvision.data import Mixup\n'), ((366, 393), 'oneflow.randn', 'flow.randn', (['(16)', '(3)', '(224)', '(224)'], {}), '(16, 3, 224, 224)\n', (376, 393), True, 'import oneflow as flow\n'), ((414, 432), 'oneflow.arange', 'flow.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (425, 432), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import re import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util import oneflow.python.framework.c_api_util as c_api_util import oneflow.python.framework.compile_context as compile_context import oneflow.python.framework.id_util as id_util import oneflow.python.framework.input_blob_def as input_blob_util import oneflow.python.framework.remote_blob as remote_blob_util import oneflow.python.framework.hob as hob import oneflow.python.lib.core.enable_if as enable_if import oneflow.python.framework.session_context as session_ctx import oneflow.python.framework.scope_util as scope_util import oneflow.python.eager.boxing_util as boxing_util import oneflow.python.eager.blob_register as blob_register_util import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg import oneflow._oneflow_internal blob_register = oneflow._oneflow_internal.GetDefaultBlobRegister() def InputOpByArgBlobDef(blob_def): assert isinstance(blob_def, input_blob_util.ArgBlobDef) op_conf = op_conf_util.OperatorConf() op_conf.name = blob_def.op_name op_conf.input_conf.out = blob_def.blob_name op_conf.input_conf.blob_conf.CopyFrom(blob_def.ToInterfaceBlobConf()) blob_def.AddAndInferOp(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = blob_def.op_name lbi.blob_name = blob_def.blob_name return remote_blob_util.RemoteBlob(lbi) def ReturnRemoteBlob(remote_blob, allow_cpu_return_op=True): return enable_if.unique([LazyReturnRemoteBlob, EagerReturnRemoteBlob])( remote_blob, allow_cpu_return_op ) @enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled) def LazyReturnRemoteBlob(remote_blob, allow_cpu_return_op=True): assert isinstance( remote_blob, ( oneflow._oneflow_internal.LazyMirroredBlob, oneflow._oneflow_internal.LazyConsistentBlob, ), ) op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope( remote_blob, allow_cpu_return_op ) compile_context.CurJobAddOp(op_conf, scope) return remote_blob_util.RemoteBlob(lbi) @enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled) def EagerReturnRemoteBlob(remote_blob, allow_cpu_return_op=True): if not hob.is_trainable(None): return remote_blob op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope( remote_blob, allow_cpu_return_op ) if remote_blob.blob_object.op_arg_parallel_attr.is_mirrored(): add_and_infer = compile_context.CurJobAddMirroredOp else: add_and_infer = compile_context.CurJobAddConsistentOp op_attribute = add_and_infer(op_conf, scope) def BuildInstruction(builder): get_blob_scope = blob_register_util.BnInOp2BlobObjectScope with get_blob_scope(blob_register, op_attribute) as bn_in_op2blob_object: cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString( str(op_attribute) ) builder.StatelessCall( cfg_op_attribute, remote_blob.blob_object.parallel_desc_symbol.parallel_conf, bn_in_op2blob_object, boxing_util.BoxingTo, ) oneflow._oneflow_internal.deprecated.LogicalRun(BuildInstruction) return remote_blob_util.RemoteBlob(lbi) def _GetReturnOpConfAndOutLbiAndScope(remote_blob, allow_cpu_return_op=True): op_conf = op_conf_util.OperatorConf() op_conf.name = id_util.UniqueStr("Return_") setattr(op_conf.return_conf, "in", remote_blob.unique_name) op_conf.return_conf.out = "out" if allow_cpu_return_op: op_conf.device_tag = "cpu" lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" parallel_conf = placement_cfg.ParallelConf() parallel_conf.CopyFrom(remote_blob.parallel_conf) def BuildScope(old_scope, builder): return builder.BuildScopeWithNewParallelConf(old_scope, parallel_conf) sess = session_ctx.GetDefaultSession() scope = scope_util.MakeScope(BuildScope) return op_conf, lbi, scope
[ "oneflow.python.framework.scope_util.MakeScope", "oneflow.python.framework.hob.is_trainable", "oneflow.core.operator.op_conf_pb2.OperatorConf", "oneflow.python.framework.remote_blob.RemoteBlob", "oneflow.python.framework.id_util.UniqueStr", "oneflow.python.lib.core.enable_if.condition", "oneflow.python.lib.core.enable_if.unique", "oneflow.python.framework.session_context.GetDefaultSession", "oneflow.core.register.logical_blob_id_pb2.LogicalBlobId", "oneflow.python.framework.compile_context.CurJobAddOp", "oneflow._oneflow_internal.oneflow.core.job.placement.ParallelConf" ]
[((2277, 2347), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (2296, 2347), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2801, 2870), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (2820, 2870), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1701, 1728), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (1726, 1728), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((1933, 1969), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (1967, 1969), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((2055, 2087), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (2082, 2087), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((2710, 2753), 'oneflow.python.framework.compile_context.CurJobAddOp', 'compile_context.CurJobAddOp', (['op_conf', 'scope'], {}), '(op_conf, scope)\n', (2737, 2753), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((2765, 2797), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (2792, 2797), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((3998, 4030), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (4025, 4030), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((4125, 4152), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (4150, 4152), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((4172, 4200), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Return_"""'], {}), "('Return_')\n", (4189, 4200), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4375, 4411), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (4409, 4411), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((4490, 4518), 'oneflow._oneflow_internal.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (4516, 4518), True, 'import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg\n'), ((4705, 4736), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4734, 4736), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((4749, 4781), 'oneflow.python.framework.scope_util.MakeScope', 'scope_util.MakeScope', (['BuildScope'], {}), '(BuildScope)\n', (4769, 4781), True, 'import oneflow.python.framework.scope_util as scope_util\n'), ((2162, 2225), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[LazyReturnRemoteBlob, EagerReturnRemoteBlob]'], {}), '([LazyReturnRemoteBlob, EagerReturnRemoteBlob])\n', (2178, 2225), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2948, 2970), 'oneflow.python.framework.hob.is_trainable', 'hob.is_trainable', (['None'], {}), '(None)\n', (2964, 2970), True, 'import oneflow.python.framework.hob as hob\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np import oneflow as flow import torch as ori_torch import oneflow.unittest from collections import OrderedDict from oneflow.test_utils.automated_test_util import * from oneflow.test_utils.test_util import GenArgList from oneflow.nn.common_types import _size_1_t, _size_2_t, _size_3_t @autotest(n=1, check_graph=False) def _test_maxpool1d_functional(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random().to(int).value() * 8 dim1 = random().to(int).value() * 8 x = random_tensor(ndim=3, dim0=dim0, dim1=dim1, dim2=random(20, 22)).to_global( placement, sbp ) y = torch.nn.functional.max_pool1d( x, kernel_size=random(4, 6).to(int), stride=random(1, 3).to(int), padding=random(1, 3).to(int), dilation=random(2, 4).to(int), ceil_mode=random().to(bool), return_indices=return_indices, ) if return_indices: return y[0] else: return y @autotest(n=1, check_graph=False) def _test_maxpool2d_functional(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random().to(int).value() * 8 dim1 = random().to(int).value() * 8 x = random_tensor( ndim=4, dim0=dim0, dim1=dim1, dim2=random(20, 22), dim3=random(20, 22) ).to_global(placement, sbp) y = torch.nn.functional.max_pool2d( x, kernel_size=random(4, 6).to(int), stride=random(1, 3).to(int), padding=random(1, 3).to(int), dilation=random(2, 4).to(int), ceil_mode=random().to(bool), return_indices=return_indices, ) if return_indices: return y[0] else: return y @autotest(n=1, check_graph=False) def _test_maxpool3d_functional(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random().to(int).value() * 8 dim1 = random().to(int).value() * 8 x = random_tensor( ndim=5, dim0=dim0, dim1=dim1, dim2=random(20, 22), dim3=random(20, 22), dim4=random(20, 22), ).to_global(placement, sbp) y = torch.nn.functional.max_pool3d( x, kernel_size=random(4, 6).to(int), stride=random(1, 3).to(int), padding=random(1, 3).to(int), dilation=random(2, 4).to(int), ceil_mode=random().to(bool), return_indices=return_indices, ) if return_indices: return y[0] else: return y @autotest(n=1, check_graph=False) def _test_maxpool1d(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random().to(int).value() * 8 dim1 = random().to(int).value() * 8 m = torch.nn.MaxPool1d( kernel_size=random(4, 6).to(_size_1_t), stride=random(1, 3).to(_size_1_t), padding=random(1, 3).to(_size_1_t), dilation=random(2, 4).to(_size_1_t), ceil_mode=random(), return_indices=return_indices, ) m.train(random()) x = random_tensor(ndim=3, dim0=dim0, dim1=dim1, dim2=random(20, 22)).to_global( placement, sbp ) y = m(x) if return_indices: return y[0] else: return y @autotest(n=1, check_graph=False) def _test_maxpool2d(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random(1, 3).to(int).value() * 8 dim1 = random(1, 3).to(int).value() * 8 m = torch.nn.MaxPool2d( kernel_size=random(4, 6).to(_size_2_t), stride=random(1, 3).to(_size_2_t), padding=random(1, 3).to(_size_2_t), dilation=random(2, 4).to(_size_2_t), ceil_mode=random(), return_indices=return_indices, ) m.train(random()) x = random_tensor( ndim=4, dim0=dim0, dim1=dim1, dim2=random(20, 22), dim3=random(20, 22) ).to_global(placement, sbp) y = m(x) if return_indices: return y[0] else: return y @autotest(n=1, check_graph=False) def _test_maxpool3d(test_case, placement, sbp): return_indices = random().to(bool).value() dim0 = random().to(int).value() * 8 dim1 = random().to(int).value() * 8 m = torch.nn.MaxPool3d( kernel_size=random(4, 6).to(_size_3_t), stride=random(1, 3).to(_size_3_t), padding=random(1, 3).to(_size_3_t), dilation=random(2, 4).to(_size_3_t), ceil_mode=random(), return_indices=return_indices, ) m.train(random()) x = random_tensor( ndim=5, dim0=dim0, dim1=dim1, dim2=random(20, 22), dim3=random(20, 22), dim4=random(20, 22), ).to_global(placement, sbp) y = m(x) if return_indices: return y[0] else: return y def _test_maxpool2d_channel_last( test_case, placement, sbp, shape, kernel_size, stride, padding, dilation, ceil_mode ): os.environ["ONEFLOW_ENABLE_NHWC"] = "1" tensor = random_tensor(len(shape), *shape, requires_grad=False).to_global( placement, sbp ) # oneflow result x1 = tensor.oneflow m1 = flow.nn.MaxPool2d( kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, ) y1 = m1(x1) # pytorch result x2 = tensor.pytorch.permute(0, 3, 1, 2).to(placement.type) m2 = ori_torch.nn.MaxPool2d( kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, ) y2 = m2(x2).permute(0, 2, 3, 1) test_case.assertTrue( np.allclose(y1.detach().cpu().numpy(), y2.detach().cpu().numpy(), 1e-4, 1e-4) ) os.environ["ONEFLOW_ENABLE_NHWC"] = "0" class TestMaxPool(flow.unittest.TestCase): @globaltest def test_maxpool(test_case): for placement in all_placement(): for sbp in all_sbp(placement, max_dim=2): _test_maxpool1d_functional(test_case, placement, sbp) _test_maxpool2d_functional(test_case, placement, sbp) _test_maxpool3d_functional(test_case, placement, sbp) _test_maxpool1d(test_case, placement, sbp) _test_maxpool2d(test_case, placement, sbp) _test_maxpool3d(test_case, placement, sbp) @globaltest def test_maxpool2d_channel_last(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [_test_maxpool2d_channel_last] arg_dict["shape"] = [(1, 16, 16, 3), (2, 224, 224, 3)] arg_dict["kernel_size"] = [3, (2, 3)] arg_dict["stride"] = [1, (1, 2)] arg_dict["padding"] = [0, (0, 1)] arg_dict["dilation"] = [1, 2] arg_dict["ceil_mode"] = [True, False] for arg in GenArgList(arg_dict): for placement in all_placement(): for sbp in all_sbp(placement, valid_split_axis=[1, 2]): arg[0](test_case, placement, sbp, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.nn.MaxPool2d", "oneflow.test_utils.test_util.GenArgList" ]
[((5672, 5790), 'oneflow.nn.MaxPool2d', 'flow.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'ceil_mode': 'ceil_mode'}), '(kernel_size=kernel_size, stride=stride, padding=padding,\n dilation=dilation, ceil_mode=ceil_mode)\n', (5689, 5790), True, 'import oneflow as flow\n'), ((5944, 6068), 'torch.nn.MaxPool2d', 'ori_torch.nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'ceil_mode': 'ceil_mode'}), '(kernel_size=kernel_size, stride=stride, padding=\n padding, dilation=dilation, ceil_mode=ceil_mode)\n', (5966, 6068), True, 'import torch as ori_torch\n'), ((7579, 7594), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7592, 7594), False, 'import unittest\n'), ((6971, 6984), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6982, 6984), False, 'from collections import OrderedDict\n'), ((7342, 7362), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7352, 7362), False, 'from oneflow.test_utils.test_util import GenArgList\n')]
import json import os import oneflow as flow import oneflow.nn as nn from models.CPT import CPT class ClueAFQMCCPT(nn.Module): def __init__(self, pretrain_dir, num_labels, is_train): super(ClueAFQMCCPT, self).__init__() kwargs_path = os.path.join(pretrain_dir, "parameters.json") with open(kwargs_path, "r") as f: kwargs = json.load(f) model = CPT(**kwargs) if is_train == True: model.load_state_dict(flow.load(os.path.join(pretrain_dir, "weights"))) self.cpt = model self.classifier = nn.Linear(model.d_model, num_labels) def forward(self, inputs, masks): outputs = self.cpt(inputs, masks) outputs = outputs[0][:, 0, :] outputs = self.classifier(outputs) return outputs
[ "oneflow.nn.Linear" ]
[((257, 302), 'os.path.join', 'os.path.join', (['pretrain_dir', '"""parameters.json"""'], {}), "(pretrain_dir, 'parameters.json')\n", (269, 302), False, 'import os\n'), ((395, 408), 'models.CPT.CPT', 'CPT', ([], {}), '(**kwargs)\n', (398, 408), False, 'from models.CPT import CPT\n'), ((573, 609), 'oneflow.nn.Linear', 'nn.Linear', (['model.d_model', 'num_labels'], {}), '(model.d_model, num_labels)\n', (582, 609), True, 'import oneflow.nn as nn\n'), ((366, 378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (375, 378), False, 'import json\n'), ((482, 519), 'os.path.join', 'os.path.join', (['pretrain_dir', '"""weights"""'], {}), "(pretrain_dir, 'weights')\n", (494, 519), False, 'import os\n')]
import itertools import numpy as np import os import unittest from collections import OrderedDict from collections.abc import Iterable from PIL import Image import oneflow as flow import flowvision.transforms as transforms import flowvision.transforms.functional as F import flowvision.transforms.functional_tensor as F_t def GenCartesianProduct(sets): assert isinstance(sets, Iterable) for set in sets: assert isinstance(set, Iterable) if os.getenv("ONEFLOW_TEST_CPU_ONLY"): if "gpu" in set: set.remove("gpu") if "cuda" in set: set.remove("cuda") return itertools.product(*sets) def GenArgList(arg_dict): assert isinstance(arg_dict, OrderedDict) assert all([isinstance(x, list) for x in arg_dict.values()]) sets = [arg_set for (_, arg_set) in arg_dict.items()] return GenCartesianProduct(sets) def _test_adjust_brightness(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) x_pil = Image.fromarray(x_np, mode="RGB") # test 0 y_pil = F.adjust_brightness(x_pil, 1) y_np = np.array(y_pil) self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_brightness(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_brightness(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) def _test_adjust_contrast(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) x_pil = Image.fromarray(x_np, mode="RGB") # test 0 y_pil = F.adjust_contrast(x_pil, 1) y_np = np.array(y_pil) self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_contrast(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_contrast(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) def _test_adjust_saturation(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) x_pil = Image.fromarray(x_np, mode="RGB") # test 0 y_pil = F.adjust_saturation(x_pil, 1) y_np = np.array(y_pil) self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_saturation(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 216, 89] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_saturation(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 3, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) def _test_adjust_hue(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) x_pil = Image.fromarray(x_np, mode="RGB") with self.assertRaises(ValueError): F.adjust_hue(x_pil, -0.7) F.adjust_hue(x_pil, 1) # test 0 y_pil = F.adjust_hue(x_pil, 0) y_np = np.array(y_pil) y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) # test 1 y_pil = F.adjust_hue(x_pil, 0.25) y_np = np.array(y_pil) y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_hue(x_pil, -0.25) y_np = np.array(y_pil) y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) self.assertTrue(np.allclose(y_np, y_ans)) def _test_randomness(fn, trans, seed, p): flow.manual_seed(seed) img = transforms.ToPILImage()(flow.rand(3, 16, 18)) expected_transformed_img = fn(img) randomly_transformer_img = trans(p=p)(img) if p == 0: assert randomly_transformer_img == img elif p == 1: assert randomly_transformer_img == expected_transformed_img trans().__repr__() class TestTransform(unittest.TestCase): def test_randomness(self): arg_dict = OrderedDict() arg_dict["trans_pair"] = [ (F.vflip, transforms.RandomVerticalFlip), (F.hflip, transforms.RandomHorizontalFlip), ] arg_dict["seed"] = [*range(10)] arg_dict["p"] = [0, 1] for arg in GenArgList(arg_dict): _test_randomness(*arg[0], *arg[1:]) def test_photometric_distort(self): _test_adjust_brightness(self) _test_adjust_contrast(self) _test_adjust_saturation(self) _test_adjust_hue(self) if __name__ == "__main__": unittest.main()
[ "oneflow.rand", "oneflow.manual_seed" ]
[((642, 666), 'itertools.product', 'itertools.product', (['*sets'], {}), '(*sets)\n', (659, 666), False, 'import itertools\n'), ((1096, 1129), 'PIL.Image.fromarray', 'Image.fromarray', (['x_np'], {'mode': '"""RGB"""'}), "(x_np, mode='RGB')\n", (1111, 1129), False, 'from PIL import Image\n'), ((1156, 1185), 'flowvision.transforms.functional.adjust_brightness', 'F.adjust_brightness', (['x_pil', '(1)'], {}), '(x_pil, 1)\n', (1175, 1185), True, 'import flowvision.transforms.functional as F\n'), ((1197, 1212), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (1205, 1212), True, 'import numpy as np\n'), ((1284, 1315), 'flowvision.transforms.functional.adjust_brightness', 'F.adjust_brightness', (['x_pil', '(0.5)'], {}), '(x_pil, 0.5)\n', (1303, 1315), True, 'import flowvision.transforms.functional as F\n'), ((1327, 1342), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (1335, 1342), True, 'import numpy as np\n'), ((1535, 1564), 'flowvision.transforms.functional.adjust_brightness', 'F.adjust_brightness', (['x_pil', '(2)'], {}), '(x_pil, 2)\n', (1554, 1564), True, 'import flowvision.transforms.functional as F\n'), ((1576, 1591), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (1584, 1591), True, 'import numpy as np\n'), ((1958, 1991), 'PIL.Image.fromarray', 'Image.fromarray', (['x_np'], {'mode': '"""RGB"""'}), "(x_np, mode='RGB')\n", (1973, 1991), False, 'from PIL import Image\n'), ((2018, 2045), 'flowvision.transforms.functional.adjust_contrast', 'F.adjust_contrast', (['x_pil', '(1)'], {}), '(x_pil, 1)\n', (2035, 2045), True, 'import flowvision.transforms.functional as F\n'), ((2057, 2072), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (2065, 2072), True, 'import numpy as np\n'), ((2144, 2173), 'flowvision.transforms.functional.adjust_contrast', 'F.adjust_contrast', (['x_pil', '(0.5)'], {}), '(x_pil, 0.5)\n', (2161, 2173), True, 'import flowvision.transforms.functional as F\n'), ((2185, 2200), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (2193, 2200), True, 'import numpy as np\n'), ((2399, 2426), 'flowvision.transforms.functional.adjust_contrast', 'F.adjust_contrast', (['x_pil', '(2)'], {}), '(x_pil, 2)\n', (2416, 2426), True, 'import flowvision.transforms.functional as F\n'), ((2438, 2453), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (2446, 2453), True, 'import numpy as np\n'), ((2816, 2849), 'PIL.Image.fromarray', 'Image.fromarray', (['x_np'], {'mode': '"""RGB"""'}), "(x_np, mode='RGB')\n", (2831, 2849), False, 'from PIL import Image\n'), ((2876, 2905), 'flowvision.transforms.functional.adjust_saturation', 'F.adjust_saturation', (['x_pil', '(1)'], {}), '(x_pil, 1)\n', (2895, 2905), True, 'import flowvision.transforms.functional as F\n'), ((2917, 2932), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (2925, 2932), True, 'import numpy as np\n'), ((3004, 3035), 'flowvision.transforms.functional.adjust_saturation', 'F.adjust_saturation', (['x_pil', '(0.5)'], {}), '(x_pil, 0.5)\n', (3023, 3035), True, 'import flowvision.transforms.functional as F\n'), ((3047, 3062), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (3055, 3062), True, 'import numpy as np\n'), ((3259, 3288), 'flowvision.transforms.functional.adjust_saturation', 'F.adjust_saturation', (['x_pil', '(2)'], {}), '(x_pil, 2)\n', (3278, 3288), True, 'import flowvision.transforms.functional as F\n'), ((3300, 3315), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (3308, 3315), True, 'import numpy as np\n'), ((3671, 3704), 'PIL.Image.fromarray', 'Image.fromarray', (['x_np'], {'mode': '"""RGB"""'}), "(x_np, mode='RGB')\n", (3686, 3704), False, 'from PIL import Image\n'), ((3837, 3859), 'flowvision.transforms.functional.adjust_hue', 'F.adjust_hue', (['x_pil', '(0)'], {}), '(x_pil, 0)\n', (3849, 3859), True, 'import flowvision.transforms.functional as F\n'), ((3871, 3886), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (3879, 3886), True, 'import numpy as np\n'), ((4081, 4106), 'flowvision.transforms.functional.adjust_hue', 'F.adjust_hue', (['x_pil', '(0.25)'], {}), '(x_pil, 0.25)\n', (4093, 4106), True, 'import flowvision.transforms.functional as F\n'), ((4118, 4133), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (4126, 4133), True, 'import numpy as np\n'), ((4330, 4356), 'flowvision.transforms.functional.adjust_hue', 'F.adjust_hue', (['x_pil', '(-0.25)'], {}), '(x_pil, -0.25)\n', (4342, 4356), True, 'import flowvision.transforms.functional as F\n'), ((4368, 4383), 'numpy.array', 'np.array', (['y_pil'], {}), '(y_pil)\n', (4376, 4383), True, 'import numpy as np\n'), ((4600, 4622), 'oneflow.manual_seed', 'flow.manual_seed', (['seed'], {}), '(seed)\n', (4616, 4622), True, 'import oneflow as flow\n'), ((5574, 5589), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5587, 5589), False, 'import unittest\n'), ((467, 501), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (476, 501), False, 'import os\n'), ((1233, 1256), 'numpy.allclose', 'np.allclose', (['y_np', 'x_np'], {}), '(y_np, x_np)\n', (1244, 1256), True, 'import numpy as np\n'), ((1483, 1507), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (1494, 1507), True, 'import numpy as np\n'), ((1738, 1762), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (1749, 1762), True, 'import numpy as np\n'), ((2093, 2116), 'numpy.allclose', 'np.allclose', (['y_np', 'x_np'], {}), '(y_np, x_np)\n', (2104, 2116), True, 'import numpy as np\n'), ((2347, 2371), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (2358, 2371), True, 'import numpy as np\n'), ((2594, 2618), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (2605, 2618), True, 'import numpy as np\n'), ((2953, 2976), 'numpy.allclose', 'np.allclose', (['y_np', 'x_np'], {}), '(y_np, x_np)\n', (2964, 2976), True, 'import numpy as np\n'), ((3207, 3231), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (3218, 3231), True, 'import numpy as np\n'), ((3456, 3480), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (3467, 3480), True, 'import numpy as np\n'), ((3754, 3779), 'flowvision.transforms.functional.adjust_hue', 'F.adjust_hue', (['x_pil', '(-0.7)'], {}), '(x_pil, -0.7)\n', (3766, 3779), True, 'import flowvision.transforms.functional as F\n'), ((3788, 3810), 'flowvision.transforms.functional.adjust_hue', 'F.adjust_hue', (['x_pil', '(1)'], {}), '(x_pil, 1)\n', (3800, 3810), True, 'import flowvision.transforms.functional as F\n'), ((4029, 4053), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (4040, 4053), True, 'import numpy as np\n'), ((4278, 4302), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (4289, 4302), True, 'import numpy as np\n'), ((4526, 4550), 'numpy.allclose', 'np.allclose', (['y_np', 'y_ans'], {}), '(y_np, y_ans)\n', (4537, 4550), True, 'import numpy as np\n'), ((4633, 4656), 'flowvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4654, 4656), True, 'import flowvision.transforms as transforms\n'), ((4657, 4677), 'oneflow.rand', 'flow.rand', (['(3)', '(16)', '(18)'], {}), '(3, 16, 18)\n', (4666, 4677), True, 'import oneflow as flow\n'), ((5028, 5041), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5039, 5041), False, 'from collections import OrderedDict\n'), ((1034, 1066), 'numpy.array', 'np.array', (['x_data'], {'dtype': 'np.uint8'}), '(x_data, dtype=np.uint8)\n', (1042, 1066), True, 'import numpy as np\n'), ((1414, 1445), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (1422, 1445), True, 'import numpy as np\n'), ((1669, 1700), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (1677, 1700), True, 'import numpy as np\n'), ((1896, 1928), 'numpy.array', 'np.array', (['x_data'], {'dtype': 'np.uint8'}), '(x_data, dtype=np.uint8)\n', (1904, 1928), True, 'import numpy as np\n'), ((2278, 2309), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (2286, 2309), True, 'import numpy as np\n'), ((2525, 2556), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (2533, 2556), True, 'import numpy as np\n'), ((2754, 2786), 'numpy.array', 'np.array', (['x_data'], {'dtype': 'np.uint8'}), '(x_data, dtype=np.uint8)\n', (2762, 2786), True, 'import numpy as np\n'), ((3138, 3169), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (3146, 3169), True, 'import numpy as np\n'), ((3387, 3418), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (3395, 3418), True, 'import numpy as np\n'), ((3609, 3641), 'numpy.array', 'np.array', (['x_data'], {'dtype': 'np.uint8'}), '(x_data, dtype=np.uint8)\n', (3617, 3641), True, 'import numpy as np\n'), ((3960, 3991), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (3968, 3991), True, 'import numpy as np\n'), ((4209, 4240), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (4217, 4240), True, 'import numpy as np\n'), ((4457, 4488), 'numpy.array', 'np.array', (['y_ans'], {'dtype': 'np.uint8'}), '(y_ans, dtype=np.uint8)\n', (4465, 4488), True, 'import numpy as np\n')]
from logging import log import os import math import oneflow as flow import logging import oneflow.nn as nn from otrans.model.base import BaseModel from otrans.frontend import BuildFrontEnd from otrans.encoder import BuildEncoder from otrans.decoder import BuildDecoder from otrans.module.loss import LabelSmoothingLoss from otrans.model.ctc import CTCAssistor logger = logging.getLogger(__name__) class SpeechToText(BaseModel): def __init__(self, params): super(SpeechToText, self).__init__() self.frontend = BuildFrontEnd[params["frontend_type"]](**params["frontend"]) logger.info("Build a %s frontend!" % params["frontend_type"]) self.encoder = BuildEncoder[params["encoder_type"]](**params["encoder"]) logger.info("Build a %s encoder!" % params["encoder_type"]) self.decoder = BuildDecoder[params["decoder_type"]](**params["decoder"]) logger.info("Build a %s decoder!" % params["decoder_type"]) self.crit = LabelSmoothingLoss( size=params["decoder"]["vocab_size"], smoothing=params["smoothing"] ) self.ctc_weight = params["ctc_weight"] if self.ctc_weight > 0.0: self.assistor = CTCAssistor( hidden_size=params["encoder_output_size"], vocab_size=params["decoder"]["vocab_size"], lookahead_steps=params["lookahead_steps"] if "lookahead_steps" in params else 0, ) logger.info("Build a CTC Assistor with weight %.2f" % self.ctc_weight) def forward(self, inputs, targets): enc_inputs = inputs["inputs"] enc_mask = inputs["mask"] truth = targets["targets"] truth_length = targets["targets_length"] enc_inputs, enc_mask = self.frontend(enc_inputs, enc_mask) # 1. forward encoder memory, memory_mask, _ = self.encoder(enc_inputs, enc_mask) # 2. forward decoder target_in = truth[:, :-1].clone() logits, _ = self.decoder(target_in, memory, memory_mask) # 3. compute attention loss target_out = truth[:, 1:].clone() loss = self.crit(logits, target_out) if self.ctc_weight > 0: loss_ctc = self.compute_ctc_loss( memory, memory_mask, target_out, truth_length ) return ( (1 - self.ctc_weight) * loss + self.ctc_weight * loss_ctc, {"CTCLoss": loss_ctc.item()}, ) else: return loss, None def compute_ctc_loss(self, memory, memory_mask, targets_out, targets_length): memory_length = flow.sum(memory_mask.squeeze(1), dim=-1) loss_ctc = self.assistor(memory, memory_length, targets_out, targets_length) return loss_ctc def save_checkpoint(self, params, name): flow.save(params, os.path.join(name, "params.tar")) flow.save(self.frontend.state_dict(), os.path.join(name, "frontend.pt")) flow.save(self.encoder.state_dict(), os.path.join(name, "encoder.pt")) flow.save(self.decoder.state_dict(), os.path.join(name, "decoder.pt")) if self.ctc_weight > 0.0: flow.save(self.assistor.state_dict(), os.path.join(name, "ctc.pt")) def load_model(self, chkpt1, chkpt2, chkpt3): self.frontend.load_state_dict(flow.load(chkpt1)) self.encoder.load_state_dict(flow.load(chkpt2)) self.decoder.load_state_dict(flow.load(chkpt3)) def set_epoch(self, epoch): pass
[ "oneflow.load" ]
[((371, 398), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (388, 398), False, 'import logging\n'), ((984, 1076), 'otrans.module.loss.LabelSmoothingLoss', 'LabelSmoothingLoss', ([], {'size': "params['decoder']['vocab_size']", 'smoothing': "params['smoothing']"}), "(size=params['decoder']['vocab_size'], smoothing=params[\n 'smoothing'])\n", (1002, 1076), False, 'from otrans.module.loss import LabelSmoothingLoss\n'), ((1204, 1393), 'otrans.model.ctc.CTCAssistor', 'CTCAssistor', ([], {'hidden_size': "params['encoder_output_size']", 'vocab_size': "params['decoder']['vocab_size']", 'lookahead_steps': "(params['lookahead_steps'] if 'lookahead_steps' in params else 0)"}), "(hidden_size=params['encoder_output_size'], vocab_size=params[\n 'decoder']['vocab_size'], lookahead_steps=params['lookahead_steps'] if \n 'lookahead_steps' in params else 0)\n", (1215, 1393), False, 'from otrans.model.ctc import CTCAssistor\n'), ((2872, 2904), 'os.path.join', 'os.path.join', (['name', '"""params.tar"""'], {}), "(name, 'params.tar')\n", (2884, 2904), False, 'import os\n'), ((2952, 2985), 'os.path.join', 'os.path.join', (['name', '"""frontend.pt"""'], {}), "(name, 'frontend.pt')\n", (2964, 2985), False, 'import os\n'), ((3032, 3064), 'os.path.join', 'os.path.join', (['name', '"""encoder.pt"""'], {}), "(name, 'encoder.pt')\n", (3044, 3064), False, 'import os\n'), ((3111, 3143), 'os.path.join', 'os.path.join', (['name', '"""decoder.pt"""'], {}), "(name, 'decoder.pt')\n", (3123, 3143), False, 'import os\n'), ((3348, 3365), 'oneflow.load', 'flow.load', (['chkpt1'], {}), '(chkpt1)\n', (3357, 3365), True, 'import oneflow as flow\n'), ((3404, 3421), 'oneflow.load', 'flow.load', (['chkpt2'], {}), '(chkpt2)\n', (3413, 3421), True, 'import oneflow as flow\n'), ((3460, 3477), 'oneflow.load', 'flow.load', (['chkpt3'], {}), '(chkpt3)\n', (3469, 3477), True, 'import oneflow as flow\n'), ((3229, 3257), 'os.path.join', 'os.path.join', (['name', '"""ctc.pt"""'], {}), "(name, 'ctc.pt')\n", (3241, 3257), False, 'import os\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from random import randint from random import choice import numpy as np from oneflow.test_utils.automated_test_util import * import oneflow as flow import oneflow.unittest class TestAffineGrid(flow.unittest.TestCase): def test_affine_grid_2d(test_case): input = flow.tensor(np.arange(1.0, 7).reshape((1, 2, 3)), dtype=flow.float32) output = flow.nn.functional.affine_grid( input, flow.Size([1, 1, 2, 2]), align_corners=True ) groundtruth = np.array([[[[0.0, -3.0], [2.0, 5.0]], [[4.0, 7.0], [6.0, 15.0]]]]) test_case.assertTrue( np.allclose(output.numpy(), groundtruth, rtol=1e-3, atol=1e-4) ) output = flow.nn.functional.affine_grid( input, flow.Size([1, 1, 2, 2]), align_corners=False ) groundtruth = np.array([[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]]) test_case.assertTrue( np.allclose(output.numpy(), groundtruth, rtol=1e-3, atol=1e-4) ) def test_affine_grid_3d(test_case): input = flow.tensor(np.arange(1.0, 13).reshape((1, 3, 4)), dtype=flow.float32) output = flow.nn.functional.affine_grid( input, flow.Size([1, 1, 2, 2, 2]), align_corners=True ) groundtruth = np.array( [ [ [ [[-2.0, -10.0, -18.0], [0.0, 0.0, 0.0]], [[2.0, 2.0, 2.0], [4.0, 12.0, 20.0]], ], [ [[4.0, 4.0, 4.0], [6.0, 14.0, 22.0]], [[8.0, 16.0, 24.0], [10.0, 26.0, 42.0]], ], ] ] ) test_case.assertTrue( np.allclose(output.numpy(), groundtruth, rtol=1e-3, atol=1e-4) ) output = flow.nn.functional.affine_grid( input, flow.Size([1, 1, 2, 2, 2]), align_corners=False ) groundtruth = np.array( [ [ [ [[1.0, -1.0, -3.0], [2.0, 4.0, 6.0]], [[3.0, 5.0, 7.0], [4.0, 10.0, 16.0]], ], [ [[4.0, 6.0, 8.0], [5.0, 11.0, 17.0]], [[6.0, 12.0, 18.0], [7.0, 17.0, 27.0]], ], ] ] ) test_case.assertTrue( np.allclose(output.numpy(), groundtruth, rtol=1e-3, atol=1e-4) ) @autotest(rtol=1e-03, atol=1e-04, check_allclose=False, check_graph=True) def test_flow_affine_grid_2d_with_random_data(test_case): N = randint(1, 8) C = randint(1, 8) H = randint(1, 8) W = randint(1, 8) device = random_device() align_corners = choice([True, False]) theta = random_tensor(ndim=3, dim0=N, dim1=2, dim2=3).to(device) output = torch.nn.functional.affine_grid( theta, (N, C, H, W), align_corners=align_corners ).to(device) return output @autotest(rtol=1e-03, atol=1e-03, check_allclose=False, check_graph=True) def test_flow_affine_grid_3d_with_random_data(test_case): N = randint(1, 8) C = randint(1, 8) D = randint(1, 8) H = randint(1, 8) W = randint(1, 8) device = random_device() align_corners = choice([True, False]) theta = random_tensor(ndim=3, dim0=N, dim1=3, dim2=4).to(device) output = torch.nn.functional.affine_grid( theta, (N, C, D, H, W), align_corners=align_corners ).to(device) return output if __name__ == "__main__": unittest.main()
[ "oneflow.Size" ]
[((4279, 4294), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4292, 4294), False, 'import unittest\n'), ((1100, 1166), 'numpy.array', 'np.array', (['[[[[0.0, -3.0], [2.0, 5.0]], [[4.0, 7.0], [6.0, 15.0]]]]'], {}), '([[[[0.0, -3.0], [2.0, 5.0]], [[4.0, 7.0], [6.0, 15.0]]]])\n', (1108, 1166), True, 'import numpy as np\n'), ((1428, 1493), 'numpy.array', 'np.array', (['[[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]]'], {}), '([[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]])\n', (1436, 1493), True, 'import numpy as np\n'), ((1884, 2067), 'numpy.array', 'np.array', (['[[[[[-2.0, -10.0, -18.0], [0.0, 0.0, 0.0]], [[2.0, 2.0, 2.0], [4.0, 12.0, \n 20.0]]], [[[4.0, 4.0, 4.0], [6.0, 14.0, 22.0]], [[8.0, 16.0, 24.0], [\n 10.0, 26.0, 42.0]]]]]'], {}), '([[[[[-2.0, -10.0, -18.0], [0.0, 0.0, 0.0]], [[2.0, 2.0, 2.0], [4.0,\n 12.0, 20.0]]], [[[4.0, 4.0, 4.0], [6.0, 14.0, 22.0]], [[8.0, 16.0, 24.0\n ], [10.0, 26.0, 42.0]]]]])\n', (1892, 2067), True, 'import numpy as np\n'), ((2576, 2756), 'numpy.array', 'np.array', (['[[[[[1.0, -1.0, -3.0], [2.0, 4.0, 6.0]], [[3.0, 5.0, 7.0], [4.0, 10.0, 16.0\n ]]], [[[4.0, 6.0, 8.0], [5.0, 11.0, 17.0]], [[6.0, 12.0, 18.0], [7.0, \n 17.0, 27.0]]]]]'], {}), '([[[[[1.0, -1.0, -3.0], [2.0, 4.0, 6.0]], [[3.0, 5.0, 7.0], [4.0, \n 10.0, 16.0]]], [[[4.0, 6.0, 8.0], [5.0, 11.0, 17.0]], [[6.0, 12.0, 18.0\n ], [7.0, 17.0, 27.0]]]]])\n', (2584, 2756), True, 'import numpy as np\n'), ((3268, 3281), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3275, 3281), False, 'from random import randint\n'), ((3294, 3307), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3301, 3307), False, 'from random import randint\n'), ((3320, 3333), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3327, 3333), False, 'from random import randint\n'), ((3346, 3359), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3353, 3359), False, 'from random import randint\n'), ((3417, 3438), 'random.choice', 'choice', (['[True, False]'], {}), '([True, False])\n', (3423, 3438), False, 'from random import choice\n'), ((3819, 3832), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3826, 3832), False, 'from random import randint\n'), ((3845, 3858), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3852, 3858), False, 'from random import randint\n'), ((3871, 3884), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3878, 3884), False, 'from random import randint\n'), ((3897, 3910), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3904, 3910), False, 'from random import randint\n'), ((3923, 3936), 'random.randint', 'randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3930, 3936), False, 'from random import randint\n'), ((3994, 4015), 'random.choice', 'choice', (['[True, False]'], {}), '([True, False])\n', (4000, 4015), False, 'from random import choice\n'), ((1024, 1047), 'oneflow.Size', 'flow.Size', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (1033, 1047), True, 'import oneflow as flow\n'), ((1351, 1374), 'oneflow.Size', 'flow.Size', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (1360, 1374), True, 'import oneflow as flow\n'), ((1805, 1831), 'oneflow.Size', 'flow.Size', (['[1, 1, 2, 2, 2]'], {}), '([1, 1, 2, 2, 2])\n', (1814, 1831), True, 'import oneflow as flow\n'), ((2496, 2522), 'oneflow.Size', 'flow.Size', (['[1, 1, 2, 2, 2]'], {}), '([1, 1, 2, 2, 2])\n', (2505, 2522), True, 'import oneflow as flow\n'), ((898, 915), 'numpy.arange', 'np.arange', (['(1.0)', '(7)'], {}), '(1.0, 7)\n', (907, 915), True, 'import numpy as np\n'), ((1678, 1696), 'numpy.arange', 'np.arange', (['(1.0)', '(13)'], {}), '(1.0, 13)\n', (1687, 1696), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow import oneflow as flow __all__ = [ "HalfTensor", "FloatTensor", "DoubleTensor", "BoolTensor", "ByteTensor", "CharTensor", "IntTensor", "LongTensor", # TODO: Add support for BFloat16Tensor ] def HalfTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of float16 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.float16).to("cuda") def FloatTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of float32 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.float32).to("cuda") def DoubleTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of float64 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.float64).to("cuda") def BoolTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of bool and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.bool).to("cuda") def ByteTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of uint8 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.uint8).to("cuda") def CharTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of int8 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.int8).to("cuda") def IntTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of int32 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.int32).to("cuda") def LongTensor(*args, **kwargs): r""" Creates a Tensor with the dtype of int64 and it has the same parameters as :func:`oneflow.Tensor`. """ return flow.Tensor(*args, **kwargs).to(flow.int64).to("cuda")
[ "oneflow.Tensor" ]
[((1001, 1029), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (1012, 1029), True, 'import oneflow as flow\n'), ((1227, 1255), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (1238, 1255), True, 'import oneflow as flow\n'), ((1454, 1482), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (1465, 1482), True, 'import oneflow as flow\n'), ((1676, 1704), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (1687, 1704), True, 'import oneflow as flow\n'), ((1896, 1924), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (1907, 1924), True, 'import oneflow as flow\n'), ((2116, 2144), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (2127, 2144), True, 'import oneflow as flow\n'), ((2335, 2363), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (2346, 2363), True, 'import oneflow as flow\n'), ((2556, 2584), 'oneflow.Tensor', 'flow.Tensor', (['*args'], {}), '(*args, **kwargs)\n', (2567, 2584), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np import unittest import oneflow as flow import os def _make_gpt_data_loader_func( data_file_prefix, seq_length, num_samples, batch_size, dtype, shuffle=None, random_seed=None, split_sizes=None, split_index=None, machine_num=1, device_num=1, parallel_distribution=None, start_from_saved_progress=False, ): assert machine_num > 0 assert device_num > 0 and device_num <= 4 parallel_hierachy = None if machine_num == 1: device_strs = "0:0-{}".format(device_num - 1) elif machine_num > 1: device_strs = [ "{}:0-{}".format(machine_id, device_num - 1) for machine_id in range(machine_num) ] parallel_hierachy = (machine_num, device_num) else: raise ValueError("invalid machine_num", machine_num) flow.clear_default_session() flow.config.cpu_device_num(4) flow.config.enable_legacy_model_io(True) func_cfg = flow.FunctionConfig() func_cfg.default_logical_view(flow.scope.consistent_view()) @flow.global_function("predict", function_config=func_cfg) def gpt_loader_fn() -> flow.typing.Numpy: with flow.scope.placement("cpu", device_strs, parallel_hierachy): tokens = flow.data.megatron_gpt_mmap_data_loader( data_file_prefix=data_file_prefix, seq_length=seq_length, num_samples=num_samples, batch_size=batch_size, dtype=dtype, shuffle=shuffle, random_seed=random_seed, split_sizes=split_sizes, split_index=split_index, parallel_distribution=parallel_distribution, start_from_saved_progress=start_from_saved_progress, name="GPTDataLoader", ) if ( isinstance(parallel_distribution, list) and len(parallel_distribution) > 1 ): tokens = flow.hierarchical_parallel_cast( tokens, parallel_distribution=["B", "B"] ) tokens = flow.hierarchical_parallel_cast(tokens, parallel_distribution=["B"]) return tokens check_point = flow.train.CheckPoint() check_point.init() return gpt_loader_fn @unittest.skipIf( os.getenv("ONEFLOW_TEST_GITHUB_HOSTED"), "/dataset not available on GitHub hosted servers", ) class TestGPTDataLoader(flow.unittest.TestCase): DATA_FILE_PREFIX = "/dataset/Megatron-LM/dummy/gpt_sample_dataset_text_document" SEQ_LENGTH = 1024 RANDOM_SEED = 12345 @flow.unittest.skip_unless_1n1d() @unittest.skipIf( flow.unittest.env.eager_execution_enabled(), "2-D SBP doesn't work in eager mode", ) def test_simple(self): of_gpt_data_loader_fn = _make_gpt_data_loader_func( data_file_prefix=self.DATA_FILE_PREFIX, seq_length=10, num_samples=10, batch_size=2, dtype=flow.int64, shuffle=False, start_from_saved_progress=True, ) tokens = of_gpt_data_loader_fn() # this comparison tokens is from megatron-lm gpt data loader cmp_tokens = np.array( [ [40, 1101, 845, 845, 3772, 13, 428, 318, 257, 1492, 13], [13, 612, 318, 257, 18739, 550, 257, 3290, 13, 50256, 464], ], dtype=np.int64, ) self.assertTrue(np.array_equal(tokens, cmp_tokens)) @unittest.skipIf( flow.unittest.env.eager_execution_enabled(), "2-D SBP doesn't work in eager mode", ) def test_1n1d(self): of_gpt_data_loader_fn = _make_gpt_data_loader_func( data_file_prefix=self.DATA_FILE_PREFIX, seq_length=self.SEQ_LENGTH, num_samples=648, batch_size=8, split_sizes=[949, 50, 1], split_index=0, dtype=flow.int64, shuffle=True, random_seed=self.RANDOM_SEED, ) tokens_list = [] for _ in range(5): tokens = of_gpt_data_loader_fn() tokens_list.append(tokens) return np.stack(tokens_list, axis=0) @flow.unittest.skip_unless_1n4d() @unittest.skipIf( flow.unittest.env.eager_execution_enabled(), "2-D SBP doesn't work in eager mode", ) def test_1n4d(self): of_gpt_data_loader_fn = _make_gpt_data_loader_func( data_file_prefix=self.DATA_FILE_PREFIX, seq_length=self.SEQ_LENGTH, num_samples=648, batch_size=8, split_sizes=[949, 50, 1], split_index=0, dtype=flow.int64, shuffle=True, random_seed=self.RANDOM_SEED, device_num=4, parallel_distribution=["S(0)"], ) tokens_list = [] for _ in range(5): tokens = of_gpt_data_loader_fn() tokens_list.append(tokens) result_1n4d = np.stack(tokens_list, axis=0) result_1n1d = self.test_1n1d() self.assertTrue(np.array_equal(result_1n4d, result_1n1d)) return result_1n4d @flow.unittest.skip_unless_2n4d() @unittest.skipIf( flow.unittest.env.eager_execution_enabled(), "2-D SBP doesn't work in eager mode", ) def test_2n4d(self): of_gpt_data_loader_fn = _make_gpt_data_loader_func( data_file_prefix=self.DATA_FILE_PREFIX, seq_length=self.SEQ_LENGTH, num_samples=648, batch_size=8, split_sizes=[949, 50, 1], split_index=0, dtype=flow.int64, shuffle=True, random_seed=self.RANDOM_SEED, machine_num=2, device_num=4, parallel_distribution=["S(0)", "B"], ) tokens_list = [] for _ in range(5): tokens = of_gpt_data_loader_fn() tokens_list.append(tokens) result_2n4d = np.stack(tokens_list, axis=0) result_1n1d = self.test_1n1d() self.assertTrue(np.array_equal(result_2n4d, result_1n1d)) return result_2n4d if __name__ == "__main__": unittest.main()
[ "oneflow.FunctionConfig", "oneflow.scope.consistent_view", "oneflow.hierarchical_parallel_cast", "oneflow.config.enable_legacy_model_io", "oneflow.global_function", "oneflow.unittest.skip_unless_1n4d", "oneflow.unittest.skip_unless_2n4d", "oneflow.data.megatron_gpt_mmap_data_loader", "oneflow.train.CheckPoint", "oneflow.scope.placement", "oneflow.unittest.skip_unless_1n1d", "oneflow.unittest.env.eager_execution_enabled", "oneflow.config.cpu_device_num", "oneflow.clear_default_session" ]
[((1445, 1473), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1471, 1473), True, 'import oneflow as flow\n'), ((1478, 1507), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(4)'], {}), '(4)\n', (1504, 1507), True, 'import oneflow as flow\n'), ((1512, 1552), 'oneflow.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (1546, 1552), True, 'import oneflow as flow\n'), ((1569, 1590), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1588, 1590), True, 'import oneflow as flow\n'), ((1661, 1718), 'oneflow.global_function', 'flow.global_function', (['"""predict"""'], {'function_config': 'func_cfg'}), "('predict', function_config=func_cfg)\n", (1681, 1718), True, 'import oneflow as flow\n'), ((2844, 2867), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2865, 2867), True, 'import oneflow as flow\n'), ((3224, 3256), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3254, 3256), True, 'import oneflow as flow\n'), ((4853, 4885), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (4883, 4885), True, 'import oneflow as flow\n'), ((5816, 5848), 'oneflow.unittest.skip_unless_2n4d', 'flow.unittest.skip_unless_2n4d', ([], {}), '()\n', (5846, 5848), True, 'import oneflow as flow\n'), ((2940, 2979), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_GITHUB_HOSTED"""'], {}), "('ONEFLOW_TEST_GITHUB_HOSTED')\n", (2949, 2979), False, 'import os\n'), ((6838, 6853), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6851, 6853), False, 'import unittest\n'), ((1625, 1653), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1651, 1653), True, 'import oneflow as flow\n'), ((2733, 2801), 'oneflow.hierarchical_parallel_cast', 'flow.hierarchical_parallel_cast', (['tokens'], {'parallel_distribution': "['B']"}), "(tokens, parallel_distribution=['B'])\n", (2764, 2801), True, 'import oneflow as flow\n'), ((3846, 3993), 'numpy.array', 'np.array', (['[[40, 1101, 845, 845, 3772, 13, 428, 318, 257, 1492, 13], [13, 612, 318, \n 257, 18739, 550, 257, 3290, 13, 50256, 464]]'], {'dtype': 'np.int64'}), '([[40, 1101, 845, 845, 3772, 13, 428, 318, 257, 1492, 13], [13, 612,\n 318, 257, 18739, 550, 257, 3290, 13, 50256, 464]], dtype=np.int64)\n', (3854, 3993), True, 'import numpy as np\n'), ((3287, 3330), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3328, 3330), True, 'import oneflow as flow\n'), ((4817, 4846), 'numpy.stack', 'np.stack', (['tokens_list'], {'axis': '(0)'}), '(tokens_list, axis=0)\n', (4825, 4846), True, 'import numpy as np\n'), ((4163, 4206), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4204, 4206), True, 'import oneflow as flow\n'), ((5648, 5677), 'numpy.stack', 'np.stack', (['tokens_list'], {'axis': '(0)'}), '(tokens_list, axis=0)\n', (5656, 5677), True, 'import numpy as np\n'), ((4916, 4959), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4957, 4959), True, 'import oneflow as flow\n'), ((6643, 6672), 'numpy.stack', 'np.stack', (['tokens_list'], {'axis': '(0)'}), '(tokens_list, axis=0)\n', (6651, 6672), True, 'import numpy as np\n'), ((5879, 5922), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (5920, 5922), True, 'import oneflow as flow\n'), ((1778, 1837), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', 'device_strs', 'parallel_hierachy'], {}), "('cpu', device_strs, parallel_hierachy)\n", (1798, 1837), True, 'import oneflow as flow\n'), ((1860, 2253), 'oneflow.data.megatron_gpt_mmap_data_loader', 'flow.data.megatron_gpt_mmap_data_loader', ([], {'data_file_prefix': 'data_file_prefix', 'seq_length': 'seq_length', 'num_samples': 'num_samples', 'batch_size': 'batch_size', 'dtype': 'dtype', 'shuffle': 'shuffle', 'random_seed': 'random_seed', 'split_sizes': 'split_sizes', 'split_index': 'split_index', 'parallel_distribution': 'parallel_distribution', 'start_from_saved_progress': 'start_from_saved_progress', 'name': '"""GPTDataLoader"""'}), "(data_file_prefix=data_file_prefix,\n seq_length=seq_length, num_samples=num_samples, batch_size=batch_size,\n dtype=dtype, shuffle=shuffle, random_seed=random_seed, split_sizes=\n split_sizes, split_index=split_index, parallel_distribution=\n parallel_distribution, start_from_saved_progress=\n start_from_saved_progress, name='GPTDataLoader')\n", (1899, 2253), True, 'import oneflow as flow\n'), ((4096, 4130), 'numpy.array_equal', 'np.array_equal', (['tokens', 'cmp_tokens'], {}), '(tokens, cmp_tokens)\n', (4110, 4130), True, 'import numpy as np\n'), ((5741, 5781), 'numpy.array_equal', 'np.array_equal', (['result_1n4d', 'result_1n1d'], {}), '(result_1n4d, result_1n1d)\n', (5755, 5781), True, 'import numpy as np\n'), ((6736, 6776), 'numpy.array_equal', 'np.array_equal', (['result_2n4d', 'result_1n1d'], {}), '(result_2n4d, result_1n1d)\n', (6750, 6776), True, 'import numpy as np\n'), ((2603, 2676), 'oneflow.hierarchical_parallel_cast', 'flow.hierarchical_parallel_cast', (['tokens'], {'parallel_distribution': "['B', 'B']"}), "(tokens, parallel_distribution=['B', 'B'])\n", (2634, 2676), True, 'import oneflow as flow\n')]
#!/usr/bin/python3 import time from functools import partial from typing import Dict import numpy as np import oneflow as flow from oneflow import nn import sys sys.path.append(".") from modeling import BertForPreTraining from utils.ofrecord_data_utils import OfRecordDataLoader from utils.lr_scheduler import PolynomialLR from utils.optimizer import build_optimizer from utils.metric import Metric from utils.comm import ttol, tton from utils.checkpoint import save_model import config from config import str2bool def get_config(): parser = config.get_parser() # pretrain bert config parser.add_argument( "--ofrecord_path", type=str, default="/dataset/bert/of_wiki_seq_len_128", help="Path to ofrecord dataset", ) parser.add_argument( "--train-dataset-size", type=int, default=10000000, help="dataset size of ofrecord", ) parser.add_argument( "--train-data-part", type=int, default=64, help="data part num of ofrecord" ) parser.add_argument( "--train-batch-size", type=int, default=8, help="Training batch size" ) parser.add_argument( "--val-batch-size", type=int, default=32, help="Validation batch size" ) parser.add_argument( "--train-global-batch-size", type=int, default=None, dest="train_global_batch_size", help="train batch size", ) parser.add_argument( "--val-global-batch-size", type=int, default=None, dest="val_global_batch_size", help="val batch size", ) parser.add_argument("-e", "--epochs", type=int, default=1, help="Number of epochs") parser.add_argument( "--with-cuda", type=bool, default=True, help="Training with CUDA: true, or false", ) parser.add_argument( "--cuda_devices", type=int, nargs="+", default=None, help="CUDA device ids" ) parser.add_argument( "--optim_name", type=str, default="adamw", help="optimizer name" ) parser.add_argument("--lr", type=float, default=1e-3, help="Learning rate of adam") parser.add_argument( "--weight_decay", type=float, default=0.01, help="Weight_decay of adam" ) parser.add_argument( "--loss_print_every_n_iters", type=int, default=20, help="Interval of training loss printing", ) parser.add_argument( "--val_print_every_n_iters", type=int, default=20, help="Interval of evaluation printing", ) parser.add_argument( "--checkpoint_path", type=str, default="checkpoints", help="Path to model saving", ) parser.add_argument( "--grad-acc-steps", type=int, default=1, help="Steps for gradient accumulation" ) parser.add_argument( "--nccl-fusion-threshold-mb", type=int, default=16, dest="nccl_fusion_threshold_mb", help="NCCL fusion threshold megabytes, set to 0 to compatible with previous version of OneFlow.", ) parser.add_argument( "--nccl-fusion-max-ops", type=int, default=24, dest="nccl_fusion_max_ops", help="Maximum number of ops of NCCL fusion, set to 0 to compatible with previous version of OneFlow.", ) parser.add_argument( "--use_ddp", type=str2bool, nargs="?", const=True, help="Whether to use use fp16", ) parser.add_argument( "--use_consistent", type=str2bool, nargs="?", const=True, help="Whether to use use consistent", ) parser.add_argument( "--metric-local", type=str2bool, default=False, nargs="?", const=True, dest="metric_local", ) args = parser.parse_args() return args def pretrain(graph: nn.Graph, metric_local: bool) -> Dict: # NOTE(xyliao): when using gradient accumulation, graph call 1 step for 1 mini-batch(n micro-batch) next_sent_output, next_sent_labels, loss, mlm_loss, nsp_loss = graph() # to local next_sent_output = ttol(next_sent_output, metric_local) next_sent_labels = ttol(next_sent_labels, metric_local) # next sentence prediction accuracy correct = ( next_sent_output.argmax(dim=1) .to(dtype=next_sent_labels.dtype) .eq(next_sent_labels.squeeze(1)) .to(dtype=flow.float32) .sum() .numpy() .item() ) pred_acc = np.array(correct / next_sent_labels.nelement()) return { "total_loss": tton(loss.mean(), metric_local), "mlm_loss": tton(mlm_loss.mean(), metric_local), "nsp_loss": tton(nsp_loss.mean(), metric_local), "pred_acc": pred_acc, } def validation( epoch: int, iter_per_epoch: int, graph: nn.Graph, print_interval: int, metric_local: bool, ) -> float: total_correct = 0 total_element = 0 for i in range(iter_per_epoch): start_t = time.time() next_sent_output, next_sent_labels = graph() next_sent_output = tton(next_sent_output, metric_local) next_sent_labels = tton(next_sent_labels, metric_local) end_t = time.time() # next sentence prediction accuracy correct = ( next_sent_output.argmax(axis=-1) == next_sent_labels.squeeze(1) ).sum() total_correct += correct total_element += next_sent_labels.size if (i + 1) % print_interval == 0 and flow.env.get_rank() == 0: print( "Epoch {}, val iter {}, val time: {:.3f}s".format( epoch, (i + 1), end_t - start_t ) ) if flow.env.get_rank() == 0: print( "Epoch {}, val iter {}, total accuracy {:.2f}".format( epoch, (i + 1), total_correct * 100.0 / total_element ) ) return total_correct / total_element def main(): args = get_config() world_size = flow.env.get_world_size() if args.train_global_batch_size is None: args.train_global_batch_size = args.train_batch_size * world_size else: assert args.train_global_batch_size % args.train_batch_size == 0 if args.val_global_batch_size is None: args.val_global_batch_size = args.val_batch_size * world_size else: assert args.val_global_batch_size % args.val_batch_size == 0 flow.boxing.nccl.set_fusion_threshold_mbytes(args.nccl_fusion_threshold_mb) flow.boxing.nccl.set_fusion_max_ops_num(args.nccl_fusion_max_ops) if args.with_cuda: device = "cuda" else: device = "cpu" print("Device is: ", device) print("Creating Dataloader") train_data_loader = OfRecordDataLoader( ofrecord_dir=args.ofrecord_path, mode="train", dataset_size=args.train_dataset_size, batch_size=args.train_global_batch_size, data_part_num=args.train_data_part, seq_length=args.seq_length, max_predictions_per_seq=args.max_predictions_per_seq, consistent=args.use_consistent, ) test_data_loader = OfRecordDataLoader( ofrecord_dir=args.ofrecord_path, mode="test", dataset_size=1024, batch_size=args.val_global_batch_size, data_part_num=4, seq_length=args.seq_length, max_predictions_per_seq=args.max_predictions_per_seq, consistent=args.use_consistent, ) print("Building BERT Model") hidden_size = 64 * args.num_attention_heads intermediate_size = 4 * hidden_size bert_model = BertForPreTraining( args.vocab_size, args.seq_length, hidden_size, args.num_hidden_layers, args.num_attention_heads, intermediate_size, nn.GELU(), args.hidden_dropout_prob, args.attention_probs_dropout_prob, args.max_position_embeddings, args.type_vocab_size, ) # Load the same initial parameters with lazy model. # from utils.compare_lazy_outputs import load_params_from_lazy # load_params_from_lazy( # bert_model.state_dict(), # "../../OneFlow-Benchmark/LanguageModeling/BERT/initial_model", # ) assert id(bert_model.cls.predictions.decoder.weight) == id( bert_model.bert.embeddings.word_embeddings.weight ) ns_criterion = nn.CrossEntropyLoss(reduction="mean") mlm_criterion = nn.CrossEntropyLoss(reduction="none") if args.use_consistent: placement = flow.placement("cuda", {0: range(flow.env.get_world_size())}) bert_model = bert_model.to_consistent( placement=placement, sbp=flow.sbp.broadcast ) else: bert_model.to(device) ns_criterion.to(device) mlm_criterion.to(device) optimizer = build_optimizer( args.optim_name, bert_model, args.lr, args.weight_decay, weight_decay_excludes=["bias", "LayerNorm", "layer_norm"], clip_grad_max_norm=1, clip_grad_norm_type=2.0, ) steps = args.epochs * len(train_data_loader) warmup_steps = int(steps * args.warmup_proportion) lr_scheduler = PolynomialLR(optimizer, steps=steps, end_learning_rate=0.0) lr_scheduler = flow.optim.lr_scheduler.WarmUpLR( lr_scheduler, warmup_factor=0, warmup_iters=warmup_steps, warmup_method="linear" ) def get_masked_lm_loss( logit, masked_lm_positions, masked_lm_labels, label_weights, max_predictions_per_seq, ): # gather valid position indices logit = flow.gather( logit, index=masked_lm_positions.unsqueeze(2).expand(-1, -1, args.vocab_size), dim=1, ) logit = flow.reshape(logit, [-1, args.vocab_size]) label_id = flow.reshape(masked_lm_labels, [-1]) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. pre_example_loss = mlm_criterion(logit, label_id) pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq]) numerator = flow.sum(pre_example_loss * label_weights) denominator = flow.sum(label_weights) + 1e-5 loss = numerator / denominator return loss class BertGraph(nn.Graph): def __init__(self): super().__init__() self.bert = bert_model self.ns_criterion = ns_criterion self.masked_lm_criterion = partial( get_masked_lm_loss, max_predictions_per_seq=args.max_predictions_per_seq ) self.add_optimizer(optimizer, lr_sch=lr_scheduler) self._train_data_loader = train_data_loader if args.grad_acc_steps > 1: self.config.set_gradient_accumulation_steps(args.grad_acc_steps) if args.use_fp16: self.config.enable_amp(True) grad_scaler = flow.amp.GradScaler( init_scale=2 ** 30, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, ) self.set_grad_scaler(grad_scaler) self.config.allow_fuse_add_to_output(True) self.config.allow_fuse_model_update_ops(True) def build(self): ( input_ids, next_sentence_labels, input_mask, segment_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights, ) = self._train_data_loader() input_ids = input_ids.to(device=device) input_mask = input_mask.to(device=device) segment_ids = segment_ids.to(device=device) next_sentence_labels = next_sentence_labels.to(device=device) masked_lm_ids = masked_lm_ids.to(device=device) masked_lm_positions = masked_lm_positions.to(device=device) masked_lm_weights = masked_lm_weights.to(device=device) # 1. forward the next_sentence_prediction and masked_lm model prediction_scores, seq_relationship_scores = self.bert( input_ids, segment_ids, input_mask ) # 2-1. loss of is_next classification result next_sentence_loss = self.ns_criterion( seq_relationship_scores.reshape(-1, 2), next_sentence_labels.reshape(-1) ) masked_lm_loss = self.masked_lm_criterion( prediction_scores, masked_lm_positions, masked_lm_ids, masked_lm_weights ) total_loss = masked_lm_loss + next_sentence_loss total_loss.backward() return ( seq_relationship_scores, next_sentence_labels, total_loss, masked_lm_loss, next_sentence_loss, ) bert_graph = BertGraph() class BertEvalGraph(nn.Graph): def __init__(self): super().__init__() self.bert = bert_model self._test_data_loader = test_data_loader self.config.allow_fuse_add_to_output(True) def build(self): ( input_ids, next_sent_labels, input_masks, segment_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights, ) = self._test_data_loader() input_ids = input_ids.to(device=device) input_masks = input_masks.to(device=device) segment_ids = segment_ids.to(device=device) next_sent_labels = next_sent_labels.to(device=device) masked_lm_ids = masked_lm_ids.to(device=device) masked_lm_positions = masked_lm_positions.to(device) with flow.no_grad(): # 1. forward the next_sentence_prediction and masked_lm model _, seq_relationship_scores = self.bert( input_ids, input_masks, segment_ids ) return seq_relationship_scores, next_sent_labels bert_eval_graph = BertEvalGraph() train_total_losses = [] for epoch in range(args.epochs): metric = Metric( desc="bert pretrain", print_steps=args.loss_print_every_n_iters, batch_size=args.train_global_batch_size * args.grad_acc_steps, keys=["total_loss", "mlm_loss", "nsp_loss", "pred_acc"], ) # Train bert_model.train() for step in range(len(train_data_loader)): bert_outputs = pretrain(bert_graph, args.metric_local) if flow.env.get_rank() == 0: metric.metric_cb(step, epoch=epoch)(bert_outputs) train_total_losses.append(bert_outputs["total_loss"]) # Eval bert_model.eval() val_acc = validation( epoch, len(test_data_loader), bert_eval_graph, args.val_print_every_n_iters, args.metric_local, ) save_model(bert_model, args.checkpoint_path, epoch, val_acc, args.use_consistent) if __name__ == "__main__": main()
[ "oneflow.optim.lr_scheduler.WarmUpLR", "oneflow.sum", "oneflow.boxing.nccl.set_fusion_max_ops_num", "oneflow.amp.GradScaler", "oneflow.env.get_rank", "oneflow.nn.CrossEntropyLoss", "oneflow.reshape", "oneflow.no_grad", "oneflow.env.get_world_size", "oneflow.nn.GELU", "oneflow.boxing.nccl.set_fusion_threshold_mbytes" ]
[((165, 185), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (180, 185), False, 'import sys\n'), ((553, 572), 'config.get_parser', 'config.get_parser', ([], {}), '()\n', (570, 572), False, 'import config\n'), ((4163, 4199), 'utils.comm.ttol', 'ttol', (['next_sent_output', 'metric_local'], {}), '(next_sent_output, metric_local)\n', (4167, 4199), False, 'from utils.comm import ttol, tton\n'), ((4223, 4259), 'utils.comm.ttol', 'ttol', (['next_sent_labels', 'metric_local'], {}), '(next_sent_labels, metric_local)\n', (4227, 4259), False, 'from utils.comm import ttol, tton\n'), ((6056, 6081), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (6079, 6081), True, 'import oneflow as flow\n'), ((6482, 6557), 'oneflow.boxing.nccl.set_fusion_threshold_mbytes', 'flow.boxing.nccl.set_fusion_threshold_mbytes', (['args.nccl_fusion_threshold_mb'], {}), '(args.nccl_fusion_threshold_mb)\n', (6526, 6557), True, 'import oneflow as flow\n'), ((6562, 6627), 'oneflow.boxing.nccl.set_fusion_max_ops_num', 'flow.boxing.nccl.set_fusion_max_ops_num', (['args.nccl_fusion_max_ops'], {}), '(args.nccl_fusion_max_ops)\n', (6601, 6627), True, 'import oneflow as flow\n'), ((6801, 7113), 'utils.ofrecord_data_utils.OfRecordDataLoader', 'OfRecordDataLoader', ([], {'ofrecord_dir': 'args.ofrecord_path', 'mode': '"""train"""', 'dataset_size': 'args.train_dataset_size', 'batch_size': 'args.train_global_batch_size', 'data_part_num': 'args.train_data_part', 'seq_length': 'args.seq_length', 'max_predictions_per_seq': 'args.max_predictions_per_seq', 'consistent': 'args.use_consistent'}), "(ofrecord_dir=args.ofrecord_path, mode='train',\n dataset_size=args.train_dataset_size, batch_size=args.\n train_global_batch_size, data_part_num=args.train_data_part, seq_length\n =args.seq_length, max_predictions_per_seq=args.max_predictions_per_seq,\n consistent=args.use_consistent)\n", (6819, 7113), False, 'from utils.ofrecord_data_utils import OfRecordDataLoader\n'), ((7191, 7458), 'utils.ofrecord_data_utils.OfRecordDataLoader', 'OfRecordDataLoader', ([], {'ofrecord_dir': 'args.ofrecord_path', 'mode': '"""test"""', 'dataset_size': '(1024)', 'batch_size': 'args.val_global_batch_size', 'data_part_num': '(4)', 'seq_length': 'args.seq_length', 'max_predictions_per_seq': 'args.max_predictions_per_seq', 'consistent': 'args.use_consistent'}), "(ofrecord_dir=args.ofrecord_path, mode='test',\n dataset_size=1024, batch_size=args.val_global_batch_size, data_part_num\n =4, seq_length=args.seq_length, max_predictions_per_seq=args.\n max_predictions_per_seq, consistent=args.use_consistent)\n", (7209, 7458), False, 'from utils.ofrecord_data_utils import OfRecordDataLoader\n'), ((8427, 8464), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (8446, 8464), False, 'from oneflow import nn\n'), ((8485, 8522), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (8504, 8522), False, 'from oneflow import nn\n'), ((8869, 9055), 'utils.optimizer.build_optimizer', 'build_optimizer', (['args.optim_name', 'bert_model', 'args.lr', 'args.weight_decay'], {'weight_decay_excludes': "['bias', 'LayerNorm', 'layer_norm']", 'clip_grad_max_norm': '(1)', 'clip_grad_norm_type': '(2.0)'}), "(args.optim_name, bert_model, args.lr, args.weight_decay,\n weight_decay_excludes=['bias', 'LayerNorm', 'layer_norm'],\n clip_grad_max_norm=1, clip_grad_norm_type=2.0)\n", (8884, 9055), False, 'from utils.optimizer import build_optimizer\n'), ((9236, 9295), 'utils.lr_scheduler.PolynomialLR', 'PolynomialLR', (['optimizer'], {'steps': 'steps', 'end_learning_rate': '(0.0)'}), '(optimizer, steps=steps, end_learning_rate=0.0)\n', (9248, 9295), False, 'from utils.lr_scheduler import PolynomialLR\n'), ((9316, 9434), 'oneflow.optim.lr_scheduler.WarmUpLR', 'flow.optim.lr_scheduler.WarmUpLR', (['lr_scheduler'], {'warmup_factor': '(0)', 'warmup_iters': 'warmup_steps', 'warmup_method': '"""linear"""'}), "(lr_scheduler, warmup_factor=0,\n warmup_iters=warmup_steps, warmup_method='linear')\n", (9348, 9434), True, 'import oneflow as flow\n'), ((15326, 15412), 'utils.checkpoint.save_model', 'save_model', (['bert_model', 'args.checkpoint_path', 'epoch', 'val_acc', 'args.use_consistent'], {}), '(bert_model, args.checkpoint_path, epoch, val_acc, args.\n use_consistent)\n', (15336, 15412), False, 'from utils.checkpoint import save_model\n'), ((5047, 5058), 'time.time', 'time.time', ([], {}), '()\n', (5056, 5058), False, 'import time\n'), ((5141, 5177), 'utils.comm.tton', 'tton', (['next_sent_output', 'metric_local'], {}), '(next_sent_output, metric_local)\n', (5145, 5177), False, 'from utils.comm import ttol, tton\n'), ((5205, 5241), 'utils.comm.tton', 'tton', (['next_sent_labels', 'metric_local'], {}), '(next_sent_labels, metric_local)\n', (5209, 5241), False, 'from utils.comm import ttol, tton\n'), ((5258, 5269), 'time.time', 'time.time', ([], {}), '()\n', (5267, 5269), False, 'import time\n'), ((5757, 5776), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5774, 5776), True, 'import oneflow as flow\n'), ((7847, 7856), 'oneflow.nn.GELU', 'nn.GELU', ([], {}), '()\n', (7854, 7856), False, 'from oneflow import nn\n'), ((9826, 9868), 'oneflow.reshape', 'flow.reshape', (['logit', '[-1, args.vocab_size]'], {}), '(logit, [-1, args.vocab_size])\n', (9838, 9868), True, 'import oneflow as flow\n'), ((9888, 9924), 'oneflow.reshape', 'flow.reshape', (['masked_lm_labels', '[-1]'], {}), '(masked_lm_labels, [-1])\n', (9900, 9924), True, 'import oneflow as flow\n'), ((10278, 10339), 'oneflow.reshape', 'flow.reshape', (['pre_example_loss', '[-1, max_predictions_per_seq]'], {}), '(pre_example_loss, [-1, max_predictions_per_seq])\n', (10290, 10339), True, 'import oneflow as flow\n'), ((10360, 10402), 'oneflow.sum', 'flow.sum', (['(pre_example_loss * label_weights)'], {}), '(pre_example_loss * label_weights)\n', (10368, 10402), True, 'import oneflow as flow\n'), ((14530, 14730), 'utils.metric.Metric', 'Metric', ([], {'desc': '"""bert pretrain"""', 'print_steps': 'args.loss_print_every_n_iters', 'batch_size': '(args.train_global_batch_size * args.grad_acc_steps)', 'keys': "['total_loss', 'mlm_loss', 'nsp_loss', 'pred_acc']"}), "(desc='bert pretrain', print_steps=args.loss_print_every_n_iters,\n batch_size=args.train_global_batch_size * args.grad_acc_steps, keys=[\n 'total_loss', 'mlm_loss', 'nsp_loss', 'pred_acc'])\n", (14536, 14730), False, 'from utils.metric import Metric\n'), ((10425, 10448), 'oneflow.sum', 'flow.sum', (['label_weights'], {}), '(label_weights)\n', (10433, 10448), True, 'import oneflow as flow\n'), ((10725, 10811), 'functools.partial', 'partial', (['get_masked_lm_loss'], {'max_predictions_per_seq': 'args.max_predictions_per_seq'}), '(get_masked_lm_loss, max_predictions_per_seq=args.\n max_predictions_per_seq)\n', (10732, 10811), False, 'from functools import partial\n'), ((5553, 5572), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5570, 5572), True, 'import oneflow as flow\n'), ((11182, 11287), 'oneflow.amp.GradScaler', 'flow.amp.GradScaler', ([], {'init_scale': '(2 ** 30)', 'growth_factor': '(2.0)', 'backoff_factor': '(0.5)', 'growth_interval': '(2000)'}), '(init_scale=2 ** 30, growth_factor=2.0, backoff_factor=\n 0.5, growth_interval=2000)\n', (11201, 11287), True, 'import oneflow as flow\n'), ((14121, 14135), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (14133, 14135), True, 'import oneflow as flow\n'), ((14960, 14979), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (14977, 14979), True, 'import oneflow as flow\n'), ((8605, 8630), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (8628, 8630), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import cv2 import numpy as np import typing as tp import oneflow as flow import oneflow.typing as otp import image_test_util def _make_image_resize_to_fixed_func( target_size, image_static_shape, dtype, origin_dtype=flow.float32, channels=3, interpolation_type="bilinear", func_cfg=None, print_debug_info=False, ): @flow.global_function(type="predict", function_config=func_cfg) def image_resize_to_fixed( image_list: otp.ListListNumpy.Placeholder( shape=image_static_shape, dtype=origin_dtype ) ) -> tp.Tuple[otp.ListNumpy, otp.ListNumpy]: image_buffer = flow.tensor_list_to_tensor_buffer(image_list) res_image, scale, _ = flow.image.resize( image_buffer, target_size=target_size, keep_aspect_ratio=False, channels=channels, dtype=dtype, interpolation_type=interpolation_type, ) return res_image, scale return image_resize_to_fixed def _make_image_resize_keep_aspect_ratio_func( target_size, min_size, max_size, image_static_shape, aspect_ratio_list, dtype, channels=3, resize_side="shorter", interpolation_type="bilinear", func_cfg=None, print_debug_info=False, ): @flow.global_function(type="predict", function_config=func_cfg) def image_resize_keep_aspect_ratio( image_list: otp.ListListNumpy.Placeholder( shape=image_static_shape, dtype=dtype ), ) -> tp.Tuple[otp.ListListNumpy, otp.ListNumpy, otp.ListNumpy]: image_buffer = flow.tensor_list_to_tensor_buffer(image_list) res_image, scale, new_size = flow.image.resize( image_buffer, target_size=target_size, min_size=min_size, max_size=max_size, keep_aspect_ratio=True, resize_side=resize_side, interpolation_type=interpolation_type, ) out_shape = image_test_util.infer_keep_aspect_ratio_resized_images_static_shape( target_size=target_size, min_size=min_size, max_size=max_size, aspect_ratio_list=aspect_ratio_list, resize_side=resize_side, channels=channels, ) if print_debug_info: print("resized image_static_shape: {}".format(out_shape)) res_image = flow.tensor_buffer_to_tensor_list( res_image, shape=out_shape, dtype=dtype, ) return res_image, scale, new_size return image_resize_keep_aspect_ratio def _of_image_resize( image_list, dtype=flow.float32, origin_dtype=None, channels=3, keep_aspect_ratio=False, target_size=None, min_size=None, max_size=None, resize_side="shorter", interpolation_type="bilinear", print_debug_info=False, ): assert isinstance(image_list, (list, tuple)) assert all(isinstance(image, np.ndarray) for image in image_list) assert all(image.ndim == 3 for image in image_list) assert all(image.shape[2] == channels for image in image_list) image_static_shape, aspect_ratio_list = image_test_util.infer_images_static_shape( image_list, channels ) if print_debug_info: print("image_static_shape: {}".format(image_static_shape)) print("aspect_ratio_list: {}".format(aspect_ratio_list)) flow.clear_default_session() func_cfg = flow.FunctionConfig() func_cfg.default_logical_view(flow.scope.mirrored_view()) image_list = [np.expand_dims(image, axis=0) for image in image_list] if keep_aspect_ratio: image_resize_func = _make_image_resize_keep_aspect_ratio_func( target_size=target_size, min_size=min_size, max_size=max_size, image_static_shape=image_static_shape, aspect_ratio_list=aspect_ratio_list, dtype=dtype, channels=channels, resize_side=resize_side, interpolation_type=interpolation_type, func_cfg=func_cfg, print_debug_info=print_debug_info, ) res_image, scale, new_size = image_resize_func([image_list]) return (res_image[0], scale[0], new_size[0]) else: if origin_dtype is None: origin_dtype = dtype image_resize_func = _make_image_resize_to_fixed_func( target_size=target_size, image_static_shape=image_static_shape, dtype=dtype, origin_dtype=origin_dtype, channels=channels, interpolation_type=interpolation_type, func_cfg=func_cfg, print_debug_info=print_debug_info, ) res_image, scale = image_resize_func([image_list]) new_size = np.asarray([(target_size, target_size)] * len(image_list)) return (res_image[0], scale[0], new_size) def _get_resize_size_and_scale( w, h, target_size, min_size=None, max_size=None, keep_aspect_ratio=True, resize_side="shorter", ): if keep_aspect_ratio: assert isinstance(target_size, int) aspect_ratio = float(min((w, h))) / float(max((w, h))) ( min_res_size, max_res_size, ) = image_test_util.compute_keep_aspect_ratio_resized_size( target_size, min_size, max_size, aspect_ratio, resize_side ) if w < h: res_w = min_res_size res_h = max_res_size else: res_w = max_res_size res_h = min_res_size else: assert isinstance(target_size, (list, tuple)) assert len(target_size) == 2 assert all(isinstance(size, int) for size in target_size) res_w, res_h = target_size scale_w = res_w / w scale_h = res_h / h return (res_w, res_h), (scale_w, scale_h) def _cv_image_resize( image_list, target_size, keep_aspect_ratio=True, min_size=None, max_size=None, resize_side="shorter", interpolation=cv2.INTER_LINEAR, dtype=np.float32, ): res_image_list = [] res_size_list = [] res_scale_list = [] for image in image_list: h, w = image.shape[:2] new_size, scale = _get_resize_size_and_scale( w, h, target_size, min_size, max_size, keep_aspect_ratio, resize_side ) res_image_list.append( cv2.resize(image.squeeze(), new_size, interpolation=interpolation).astype( dtype ) ) res_size_list.append(new_size) res_scale_list.append(scale) return res_image_list, res_scale_list, res_size_list def _test_image_resize_with_cv( test_case, image_files, target_size, min_size=None, max_size=None, keep_aspect_ratio=True, resize_side="shorter", dtype=flow.float32, origin_dtype=None, print_debug_info=False, ): if origin_dtype is None: origin_dtype = dtype image_list = image_test_util.read_images_by_cv(image_files, origin_dtype) if print_debug_info: print("origin images shapes: {}".format([image.shape for image in image_list])) print( "target_size: {}, min_size: {}, max_size: {}, keep_aspect_ratio: {}, \n" "resize_side: {}, dtype: {}, origin_dtype: {}".format( target_size, min_size, max_size, keep_aspect_ratio, resize_side, dtype, origin_dtype, ) ) of_res_images, of_scales, of_new_sizes = _of_image_resize( image_list=image_list, dtype=dtype, origin_dtype=origin_dtype, keep_aspect_ratio=keep_aspect_ratio, target_size=target_size, min_size=min_size, max_size=max_size, resize_side=resize_side, print_debug_info=print_debug_info, ) cv_res_images, cv_scales, cv_new_sizes = _cv_image_resize( image_list=image_list, target_size=target_size, keep_aspect_ratio=keep_aspect_ratio, min_size=min_size, max_size=max_size, resize_side=resize_side, dtype=flow.convert_oneflow_dtype_to_numpy_dtype(dtype), ) if print_debug_info: print("comparing resized image between of and cv") for i, (of_image, cv_image) in enumerate(zip(of_res_images, cv_res_images)): print(" origin image shape: {}".format(image_list[i].shape)) print( " resized image shape: {} vs. {}".format( of_image.shape, cv_image.shape ) ) # print(" of_res_image:\n{}".format(of_res_image)) # print(" cv_res_image:\n{}".format(cv_res_image)) print("comparing resized image scale between of and cv") for of_scale, cv_scale in zip(of_scales, cv_scales): print(" scale: {} vs. {}:".format(of_scale, cv_scale)) print("comparing resized image new size between of and cv") for of_new_size, cv_new_size in zip(of_new_sizes, cv_new_sizes): print(" new_size: {} vs. {}:".format(of_new_size, cv_new_size)) for ( of_res_image, cv_res_image, of_scale, cv_scale, of_new_size, cv_new_size, ) in zip( of_res_images, cv_res_images, of_scales, cv_scales, of_new_sizes, cv_new_sizes, ): test_case.assertTrue(np.allclose(of_res_image, cv_res_image)) test_case.assertTrue(np.allclose(of_scale, cv_scale)) test_case.assertTrue(np.allclose(of_new_size, cv_new_size)) # @flow.unittest.skip_unless_1n1d() # TODO(zhangwenxiao, jiangxuefei): refine in multi-client @unittest.skipIf(True, "skip for now because of single-client tensor_list removed") class TestImageResize(flow.unittest.TestCase): def test_image_resize_to_fixed_size(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=(224, 224), keep_aspect_ratio=False, # print_debug_info=True, ) def test_image_resize_shorter_to_target_size(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=800, keep_aspect_ratio=True, resize_side="shorter", # print_debug_info=True, ) def test_image_resize_longer_to_target_size(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=1000, keep_aspect_ratio=True, resize_side="longer", # print_debug_info=True, ) def test_image_resize_shorter_to_target_size_with_max_size(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=800, max_size=1333, keep_aspect_ratio=True, resize_side="shorter", # print_debug_info=True, ) def test_image_resize_longer_to_target_size_with_min_size(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=1000, min_size=600, keep_aspect_ratio=True, resize_side="longer", # print_debug_info=True, ) def test_image_resize_to_fixed_size_with_dtype_uint8(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=(1000, 1000), keep_aspect_ratio=False, dtype=flow.uint8, # print_debug_info=True, ) def test_image_resize_shorter_to_target_size_with_max_size_with_dtype_uint8( test_case, ): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=1000, max_size=1600, keep_aspect_ratio=True, resize_side="shorter", dtype=flow.uint8, # print_debug_info=True, ) def test_image_resize_uint8_to_float(test_case): image_files, _ = image_test_util.random_sample_images_from_coco() _test_image_resize_with_cv( test_case, image_files, target_size=(1000, 1000), keep_aspect_ratio=False, dtype=flow.float32, origin_dtype=flow.uint8, # print_debug_info=True, ) if __name__ == "__main__": unittest.main()
[ "oneflow.FunctionConfig", "oneflow.tensor_buffer_to_tensor_list", "oneflow.convert_oneflow_dtype_to_numpy_dtype", "oneflow.tensor_list_to_tensor_buffer", "oneflow.global_function", "oneflow.typing.ListListNumpy.Placeholder", "oneflow.clear_default_session", "oneflow.scope.mirrored_view", "oneflow.image.resize" ]
[((10347, 10433), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (10362, 10433), False, 'import unittest\n'), ((960, 1022), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'func_cfg'}), "(type='predict', function_config=func_cfg)\n", (980, 1022), True, 'import oneflow as flow\n'), ((1908, 1970), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'func_cfg'}), "(type='predict', function_config=func_cfg)\n", (1928, 1970), True, 'import oneflow as flow\n'), ((3767, 3830), 'image_test_util.infer_images_static_shape', 'image_test_util.infer_images_static_shape', (['image_list', 'channels'], {}), '(image_list, channels)\n', (3808, 3830), False, 'import image_test_util\n'), ((4007, 4035), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (4033, 4035), True, 'import oneflow as flow\n'), ((4051, 4072), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4070, 4072), True, 'import oneflow as flow\n'), ((7591, 7651), 'image_test_util.read_images_by_cv', 'image_test_util.read_images_by_cv', (['image_files', 'origin_dtype'], {}), '(image_files, origin_dtype)\n', (7624, 7651), False, 'import image_test_util\n'), ((13641, 13656), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13654, 13656), False, 'import unittest\n'), ((1244, 1289), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['image_list'], {}), '(image_list)\n', (1277, 1289), True, 'import oneflow as flow\n'), ((1320, 1482), 'oneflow.image.resize', 'flow.image.resize', (['image_buffer'], {'target_size': 'target_size', 'keep_aspect_ratio': '(False)', 'channels': 'channels', 'dtype': 'dtype', 'interpolation_type': 'interpolation_type'}), '(image_buffer, target_size=target_size, keep_aspect_ratio=\n False, channels=channels, dtype=dtype, interpolation_type=\n interpolation_type)\n', (1337, 1482), True, 'import oneflow as flow\n'), ((2214, 2259), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['image_list'], {}), '(image_list)\n', (2247, 2259), True, 'import oneflow as flow\n'), ((2297, 2487), 'oneflow.image.resize', 'flow.image.resize', (['image_buffer'], {'target_size': 'target_size', 'min_size': 'min_size', 'max_size': 'max_size', 'keep_aspect_ratio': '(True)', 'resize_side': 'resize_side', 'interpolation_type': 'interpolation_type'}), '(image_buffer, target_size=target_size, min_size=min_size,\n max_size=max_size, keep_aspect_ratio=True, resize_side=resize_side,\n interpolation_type=interpolation_type)\n', (2314, 2487), True, 'import oneflow as flow\n'), ((2596, 2817), 'image_test_util.infer_keep_aspect_ratio_resized_images_static_shape', 'image_test_util.infer_keep_aspect_ratio_resized_images_static_shape', ([], {'target_size': 'target_size', 'min_size': 'min_size', 'max_size': 'max_size', 'aspect_ratio_list': 'aspect_ratio_list', 'resize_side': 'resize_side', 'channels': 'channels'}), '(target_size\n =target_size, min_size=min_size, max_size=max_size, aspect_ratio_list=\n aspect_ratio_list, resize_side=resize_side, channels=channels)\n', (2663, 2817), False, 'import image_test_util\n'), ((3011, 3085), 'oneflow.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['res_image'], {'shape': 'out_shape', 'dtype': 'dtype'}), '(res_image, shape=out_shape, dtype=dtype)\n', (3044, 3085), True, 'import oneflow as flow\n'), ((4107, 4133), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (4131, 4133), True, 'import oneflow as flow\n'), ((4154, 4183), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (4168, 4183), True, 'import numpy as np\n'), ((5875, 5993), 'image_test_util.compute_keep_aspect_ratio_resized_size', 'image_test_util.compute_keep_aspect_ratio_resized_size', (['target_size', 'min_size', 'max_size', 'aspect_ratio', 'resize_side'], {}), '(target_size,\n min_size, max_size, aspect_ratio, resize_side)\n', (5929, 5993), False, 'import image_test_util\n'), ((10554, 10602), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (10600, 10602), False, 'import image_test_util\n'), ((10894, 10942), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (10940, 10942), False, 'import image_test_util\n'), ((11260, 11308), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (11306, 11308), False, 'import image_test_util\n'), ((11641, 11689), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (11687, 11689), False, 'import image_test_util\n'), ((12048, 12096), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (12094, 12096), False, 'import image_test_util\n'), ((12449, 12497), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (12495, 12497), False, 'import image_test_util\n'), ((12867, 12915), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (12913, 12915), False, 'import image_test_util\n'), ((13284, 13332), 'image_test_util.random_sample_images_from_coco', 'image_test_util.random_sample_images_from_coco', ([], {}), '()\n', (13330, 13332), False, 'import image_test_util\n'), ((1074, 1149), 'oneflow.typing.ListListNumpy.Placeholder', 'otp.ListListNumpy.Placeholder', ([], {'shape': 'image_static_shape', 'dtype': 'origin_dtype'}), '(shape=image_static_shape, dtype=origin_dtype)\n', (1103, 1149), True, 'import oneflow.typing as otp\n'), ((2031, 2099), 'oneflow.typing.ListListNumpy.Placeholder', 'otp.ListListNumpy.Placeholder', ([], {'shape': 'image_static_shape', 'dtype': 'dtype'}), '(shape=image_static_shape, dtype=dtype)\n', (2060, 2099), True, 'import oneflow.typing as otp\n'), ((8793, 8841), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['dtype'], {}), '(dtype)\n', (8834, 8841), True, 'import oneflow as flow\n'), ((10079, 10118), 'numpy.allclose', 'np.allclose', (['of_res_image', 'cv_res_image'], {}), '(of_res_image, cv_res_image)\n', (10090, 10118), True, 'import numpy as np\n'), ((10149, 10180), 'numpy.allclose', 'np.allclose', (['of_scale', 'cv_scale'], {}), '(of_scale, cv_scale)\n', (10160, 10180), True, 'import numpy as np\n'), ((10211, 10248), 'numpy.allclose', 'np.allclose', (['of_new_size', 'cv_new_size'], {}), '(of_new_size, cv_new_size)\n', (10222, 10248), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections from typing import Optional, Union import oneflow as flow import oneflow._oneflow_internal import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util import oneflow.framework.id_util as id_util import oneflow.framework.remote_blob as remote_blob_util def reduce_mean( input_blob: oneflow._oneflow_internal.BlobDesc, axis: Optional[Union[collections.Sized, int]] = None, keepdims: bool = False, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the mean of input Blob along the specified axis Args: input_blob (oneflow._oneflow_internal.BlobDesc): A Blob axis (Optional[Union[collections.Sized, int]], optional): The dimension along which the mean value is computed. Defaults to None. keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result of average on the specified axis of input Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def reduce_mean_Job(x: tp.Numpy.Placeholder((3, 3)) ) -> tp.Numpy: return flow.math.reduce_mean(x, axis=1, keepdims=True) x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32) out = reduce_mean_Job(x) # out [[2.] # [5.] # [8.]] """ reduce_sum = flow.math.reduce_sum( input_blob, axis=axis, keepdims=keepdims, name=name ) if input_blob.is_dynamic: reduce_count = flow.math.reduced_shape_elem_cnt( input_blob, axis=axis, dtype=input_blob.dtype ) return reduce_sum / reduce_count else: if axis is None: axes = [] else: axes = list(axis) if isinstance(axis, collections.Sized) else [axis] reduce_count = 1 if len(axes) == 0: for dim in input_blob.shape: reduce_count *= dim else: for i in axes: reduce_count *= input_blob.shape[i] return flow.math.multiply(reduce_sum, 1.0 / reduce_count)
[ "oneflow.math.multiply", "oneflow.math.reduce_sum", "oneflow.math.reduced_shape_elem_cnt" ]
[((2290, 2363), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['input_blob'], {'axis': 'axis', 'keepdims': 'keepdims', 'name': 'name'}), '(input_blob, axis=axis, keepdims=keepdims, name=name)\n', (2310, 2363), True, 'import oneflow as flow\n'), ((2431, 2510), 'oneflow.math.reduced_shape_elem_cnt', 'flow.math.reduced_shape_elem_cnt', (['input_blob'], {'axis': 'axis', 'dtype': 'input_blob.dtype'}), '(input_blob, axis=axis, dtype=input_blob.dtype)\n', (2463, 2510), True, 'import oneflow as flow\n'), ((2963, 3013), 'oneflow.math.multiply', 'flow.math.multiply', (['reduce_sum', '(1.0 / reduce_count)'], {}), '(reduce_sum, 1.0 / reduce_count)\n', (2981, 3013), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import oneflow as flow import oneflow.python.framework.dtype as dtype_util import oneflow.python.framework.id_util as id_util from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.remote_blob import BlobDef from typing import Optional, Sequence @oneflow_export("tensor_buffer_to_tensor") def tensor_buffer_to_tensor( x: BlobDef, dtype: dtype_util.dtype, instance_shape: Sequence[int], name: Optional[str] = None, ) -> BlobDef: r"""Converts the Blob type to TensorBuffer. Args: x: Input `Blob`. dtype: The destination dtype. instance_shape: The shape of each TensorBuffer. name: Name for the operator. Returns: A `Blob`. """ if name is None: name = id_util.UniqueStr("TensorBufferToTensor_") return ( flow.user_op_builder(name) .Op("tensor_buffer_to_tensor") .Input("in", [x]) .Output("out") .Attr("dtype", dtype) .Attr("instance_shape", instance_shape) .Build() .InferAndTryRun() .RemoteBlobList()[0] ) @oneflow_export("tensor_to_tensor_buffer") def tensor_to_tensor_buffer( x: BlobDef, instance_dims: int, name: Optional[str] = None, ) -> BlobDef: r"""Converts the TensorBuffer Blob to dense Tensor. Args: x: Input `Blob`. instance_dims: The number of dimensions to convert to TensorBuffer. name: Name for the operator. Returns: A `Blob`. """ if name is None: name = id_util.UniqueStr("TensorToTensorBuffer_") return ( flow.user_op_builder(name) .Op("tensor_to_tensor_buffer") .Input("in", [x]) .Output("out") .Attr("instance_dims", instance_dims) .Build() .InferAndTryRun() .RemoteBlobList()[0] )
[ "oneflow.python.framework.id_util.UniqueStr", "oneflow.user_op_builder", "oneflow.python.oneflow_export.oneflow_export" ]
[((912, 953), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_buffer_to_tensor"""'], {}), "('tensor_buffer_to_tensor')\n", (926, 953), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1737, 1778), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_to_tensor_buffer"""'], {}), "('tensor_to_tensor_buffer')\n", (1751, 1778), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1399, 1441), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorBufferToTensor_"""'], {}), "('TensorBufferToTensor_')\n", (1416, 1441), True, 'import oneflow.python.framework.id_util as id_util\n'), ((2166, 2208), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorToTensorBuffer_"""'], {}), "('TensorToTensorBuffer_')\n", (2183, 2208), True, 'import oneflow.python.framework.id_util as id_util\n'), ((2230, 2256), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (2250, 2256), True, 'import oneflow as flow\n'), ((1463, 1489), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1483, 1489), True, 'import oneflow as flow\n')]
import oneflow as flow class scloss(object): def __init__(self,cnum = 3): self.cnum = cnum def ccmp(self,input, kernel_size, stride): input = flow.transpose(input, perm=[0, 3, 2, 1]) input = flow.nn.max_pool2d(input, kernel_size, stride, padding="VALID") input = flow.transpose(input, perm=[0, 3, 2, 1]) return input def loss_div(self,feature): branch = feature branch = flow.reshape(branch, (branch.shape[0], branch.shape[1], branch.shape[2] * branch.shape[3])) branch = flow.nn.softmax(branch, 2) branch = flow.reshape(branch, (branch.shape[0], branch.shape[1], feature.shape[2], feature.shape[2])) branch = self.ccmp(branch, kernel_size=(1, self.cnum), stride=(1, self.cnum)) branch = flow.reshape(branch, (branch.shape[0], branch.shape[1], branch.shape[2] * branch.shape[3])) loss_dis = 1.0 - 1.0 * flow.math.reduce_mean(flow.math.reduce_sum(branch, 2)) / self.cnum # set margin = 3.0 return loss_dis def loss_con(self,one_hot_labels, feature): branch = feature fc_part1 = flow.layers.dense( flow.reshape(branch, (branch.shape[0], -1)), units=8, # 车辆颜色类别8 use_bias=True, kernel_initializer=flow.variance_scaling_initializer(2, 'fan_in', 'random_normal'), bias_initializer=flow.zeros_initializer(), name="fc1", ) loss_con = flow.nn.softmax_cross_entropy_with_logits(one_hot_labels, fc_part1, name="softmax_loss1") return loss_con def loss_pre(self,one_hot_labels, fc8): return flow.nn.softmax_cross_entropy_with_logits(one_hot_labels, fc8) def sc_loss(one_hot_labels, logits, fc8): loss = scloss(3) return loss.loss_con(one_hot_labels, logits) + loss.loss_div(logits) + loss.loss_pre(one_hot_labels, fc8)
[ "oneflow.nn.softmax", "oneflow.transpose", "oneflow.zeros_initializer", "oneflow.variance_scaling_initializer", "oneflow.reshape", "oneflow.nn.max_pool2d", "oneflow.math.reduce_sum", "oneflow.nn.softmax_cross_entropy_with_logits" ]
[((166, 206), 'oneflow.transpose', 'flow.transpose', (['input'], {'perm': '[0, 3, 2, 1]'}), '(input, perm=[0, 3, 2, 1])\n', (180, 206), True, 'import oneflow as flow\n'), ((223, 286), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['input', 'kernel_size', 'stride'], {'padding': '"""VALID"""'}), "(input, kernel_size, stride, padding='VALID')\n", (241, 286), True, 'import oneflow as flow\n'), ((303, 343), 'oneflow.transpose', 'flow.transpose', (['input'], {'perm': '[0, 3, 2, 1]'}), '(input, perm=[0, 3, 2, 1])\n', (317, 343), True, 'import oneflow as flow\n'), ((440, 535), 'oneflow.reshape', 'flow.reshape', (['branch', '(branch.shape[0], branch.shape[1], branch.shape[2] * branch.shape[3])'], {}), '(branch, (branch.shape[0], branch.shape[1], branch.shape[2] *\n branch.shape[3]))\n', (452, 535), True, 'import oneflow as flow\n'), ((549, 575), 'oneflow.nn.softmax', 'flow.nn.softmax', (['branch', '(2)'], {}), '(branch, 2)\n', (564, 575), True, 'import oneflow as flow\n'), ((593, 689), 'oneflow.reshape', 'flow.reshape', (['branch', '(branch.shape[0], branch.shape[1], feature.shape[2], feature.shape[2])'], {}), '(branch, (branch.shape[0], branch.shape[1], feature.shape[2],\n feature.shape[2]))\n', (605, 689), True, 'import oneflow as flow\n'), ((789, 884), 'oneflow.reshape', 'flow.reshape', (['branch', '(branch.shape[0], branch.shape[1], branch.shape[2] * branch.shape[3])'], {}), '(branch, (branch.shape[0], branch.shape[1], branch.shape[2] *\n branch.shape[3]))\n', (801, 884), True, 'import oneflow as flow\n'), ((1455, 1549), 'oneflow.nn.softmax_cross_entropy_with_logits', 'flow.nn.softmax_cross_entropy_with_logits', (['one_hot_labels', 'fc_part1'], {'name': '"""softmax_loss1"""'}), "(one_hot_labels, fc_part1, name=\n 'softmax_loss1')\n", (1496, 1549), True, 'import oneflow as flow\n'), ((1629, 1691), 'oneflow.nn.softmax_cross_entropy_with_logits', 'flow.nn.softmax_cross_entropy_with_logits', (['one_hot_labels', 'fc8'], {}), '(one_hot_labels, fc8)\n', (1670, 1691), True, 'import oneflow as flow\n'), ((1147, 1190), 'oneflow.reshape', 'flow.reshape', (['branch', '(branch.shape[0], -1)'], {}), '(branch, (branch.shape[0], -1))\n', (1159, 1190), True, 'import oneflow as flow\n'), ((1282, 1345), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {}), "(2, 'fan_in', 'random_normal')\n", (1315, 1345), True, 'import oneflow as flow\n'), ((1376, 1400), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1398, 1400), True, 'import oneflow as flow\n'), ((934, 965), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['branch', '(2)'], {}), '(branch, 2)\n', (954, 965), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import os from typing import Optional import oneflow as flow import oneflow.python.framework.id_util as id_util import oneflow.python.framework.remote_blob as remote_blob_util from oneflow.python.oneflow_export import oneflow_export def build_math_binary_elementwise_op(math_op, x, y, name=None): if name is None: name = id_util.UniqueStr(math_op + "_") return ( flow.user_op_builder(name) .Op(math_op) .Input("x", [x]) .Input("y", [y]) .Output("z") .Build() .InferAndTryRun() .RemoteBlobList()[0] ) @oneflow_export("math.atan2") def atan2( x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: r"""This operator computes the values of :math:`arctan(\frac{x}{y})`. The equation is: .. math:: out = arctan(\frac{x}{y}) Args: x (remote_blob_util.BlobDef): A Blob y (remote_blob_util.BlobDef): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: remote_blob_util.BlobDef: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def atan2Job(x: tp.Numpy.Placeholder((3,),), y: tp.Numpy.Placeholder((3, )) )-> tp.Numpy: return flow.math.atan2(x, y) x = np.array([1, 2, 3]).astype(np.float32) y = np.array([4, 4, 4]).astype(np.float32) out = atan2Job(x, y) # out [0.24497867 0.4636476 0.6435011 ] # We take the first value as an example # (arctan(1/4) * pi) / 180 = 0.24497867 """ return build_math_binary_elementwise_op("atan2", x, y, name) @oneflow_export("math.pow") def pow( x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: """This operator computes the Pow result. The equation is: .. math:: out = x^y Args: x (remote_blob_util.BlobDef): A Blob y (remote_blob_util.BlobDef): A Blob, the exponential factor of Pow name (Optional[str], optional): The name for the operation. Defaults to None. Returns: remote_blob_util.BlobDef: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def powJob(x: tp.Numpy.Placeholder((3,), ), y: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.pow(x, y) x = np.array([2, 3, 4]).astype(np.float32) y = np.array([2, 3, 4]).astype(np.float32) out = powJob(x, y) # out [ 4. 27. 256.] """ return build_math_binary_elementwise_op("pow", x, y, name) @oneflow_export("math.floordiv") def floordiv( x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: """This operator computes the result of :math:`x/y`, rounding toward the most negative integer value Args: x (remote_blob_util.BlobDef): A Blob y (remote_blob_util.BlobDef): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: remote_blob_util.BlobDef: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def floor_div_Job(x: tp.Numpy.Placeholder((3,)), y: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.floordiv(x, y) x = np.array([4, 3, 5]).astype(np.float32) y = np.array([3, 2, 2]).astype(np.float32) out = floor_div_Job(x, y) # out [1. 1. 2.] """ return build_math_binary_elementwise_op("floordiv", x, y, name) @oneflow_export("math.xdivy") def xdivy( x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: """This operator computes the result of :math:`x/y` Args: x (remote_blob_util.BlobDef): A Blob y (remote_blob_util.BlobDef): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: remote_blob_util.BlobDef: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def xdivy_Job(x: tp.Numpy.Placeholder((3,)), y: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.xdivy(x, y) x = np.array([4, 3, 5]).astype(np.float32) y = np.array([3, 2, 2]).astype(np.float32) out = xdivy_Job(x, y) # out [1.3333334 1.5 2.5 ] """ return build_math_binary_elementwise_op("xdivy", x, y, name) @oneflow_export("math.xlogy") def xlogy( x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: """This operator computes the result of :math:`x*log(y)` Args: x (remote_blob_util.BlobDef): A Blob y (remote_blob_util.BlobDef): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: remote_blob_util.BlobDef: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def xlogy_Job(x: tp.Numpy.Placeholder((3,)), y: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.xlogy(x, y) x = np.array([2, 2, 2]).astype(np.float32) y = np.array([4, 8, 16]).astype(np.float32) out = xlogy_Job(x, y) # out [2.7725887 4.158883 5.5451775] """ return build_math_binary_elementwise_op("xlogy", x, y, name)
[ "oneflow.python.framework.id_util.UniqueStr", "oneflow.user_op_builder", "oneflow.python.oneflow_export.oneflow_export" ]
[((1220, 1248), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.atan2"""'], {}), "('math.atan2')\n", (1234, 1248), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2459, 2485), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.pow"""'], {}), "('math.pow')\n", (2473, 2485), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3571, 3602), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.floordiv"""'], {}), "('math.floordiv')\n", (3585, 3602), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4690, 4718), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.xdivy"""'], {}), "('math.xdivy')\n", (4704, 4718), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5767, 5795), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.xlogy"""'], {}), "('math.xlogy')\n", (5781, 5795), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((966, 998), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (["(math_op + '_')"], {}), "(math_op + '_')\n", (983, 998), True, 'import oneflow.python.framework.id_util as id_util\n'), ((1020, 1046), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1040, 1046), True, 'import oneflow as flow\n')]
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from abc import ABCMeta, abstractmethod from typing import Any, Dict import oneflow as flow from libai.config import LazyConfig, try_get_key from libai.engine import DefaultTrainer from libai.utils import distributed as dist from libai.utils.checkpoint import Checkpointer from libai.utils.logger import setup_logger logger = setup_logger(distributed_rank=dist.get_rank()) logger = logging.getLogger("libai.inference") class BasePipeline(metaclass=ABCMeta): """ Base class for all task pipeline """ def __init__( self, config_file, data_parallel=None, tensor_parallel=None, pipeline_parallel=None, **kwargs, ): # init cfg self.cfg = LazyConfig.load(config_file) flow.boxing.nccl.set_fusion_threshold_mbytes( try_get_key(self.cfg, "train.nccl_fusion_threshold_mb", default=16) ) flow.boxing.nccl.set_fusion_max_ops_num( try_get_key(self.cfg, "train.nccl_fusion_max_ops", default=24) ) self.update_cfg(data_parallel, tensor_parallel, pipeline_parallel) dist.setup_dist_util(self.cfg.train.dist) assert ( self.cfg.train.dist.data_parallel_size == 1 ), "not support data parallel yet, only support tensor and pipeline parallel" logger.info(self.cfg.train.dist) # initial and load model self.model = DefaultTrainer.build_model(self.cfg).eval() self.load_pretrain_weight(self.model, self.cfg) # initial tokenizer self.tokenizer = self.build_tokenizer(self.cfg) # set parameters ( self._preprocess_params, self._forward_params, self._postprocess_params, ) = self._parse_parameters(**kwargs) def update_cfg( self, data_parallel=1, tensor_parallel=1, pipeline_parallel=1, ): self.cfg.train.dist.data_parallel_size = data_parallel self.cfg.train.dist.tensor_parallel_size = tensor_parallel self.cfg.train.dist.pipeline_parallel_size = pipeline_parallel if self.cfg.train.dist.pipeline_parallel_size > 1: assert ( try_get_key(self.cfg.train.dist, "pipeline_num_layers") is not None ), "cfg.train.dist.pipeline_num_layers must be set when run pipeline parallel" def load_pretrain_weight(self, model, cfg): Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load( cfg.train.load_weight, resume=False ) def build_tokenizer(self, cfg): tokenizer = None if try_get_key(cfg, "tokenization") is not None: tokenizer = DefaultTrainer.build_tokenizer(cfg) return tokenizer @abstractmethod def _parse_parameters(self, **pipeline_parameters): raise NotImplementedError("_parse_parameters not implemented") def __call__(self, inputs, *args, batch_size=None, **kwargs) -> dict: preprocess_params, forward_params, postprocess_params = self._parse_parameters( **kwargs ) # noqa # Fuse __init__ params and __call__ params without modifying the __init__ ones. preprocess_params = {**self._preprocess_params, **preprocess_params} forward_params = {**self._forward_params, **forward_params} postprocess_params = {**self._postprocess_params, **postprocess_params} with flow.no_grad(): model_inputs_dict = self.preprocess(inputs, **preprocess_params) model_outputs_dict = self.forward(model_inputs_dict, **forward_params) model_outputs_dict = self.to_local(model_outputs_dict) if dist.is_main_process(): outputs_dict = self.postprocess(model_outputs_dict, **postprocess_params) else: outputs_dict = {} dist.synchronize() return outputs_dict def to_local(self, model_outputs_dict): for key, value in model_outputs_dict.items(): if isinstance(value, flow.Tensor) and value.is_global: model_outputs_dict[key] = dist.ttol( value, ranks=[0] if value.placement.ranks.ndim == 1 else [[0]] ) if flow.cuda.is_available(): dist.synchronize() return model_outputs_dict @abstractmethod def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> dict: raise NotImplementedError("preprocess not implemented") @abstractmethod def forward(self, **kwargs: Dict) -> dict: raise NotImplementedError("forward not implemented") @abstractmethod def postprocess(self, **kwargs: Dict) -> dict: raise NotImplementedError("postprocess not implemented")
[ "oneflow.no_grad", "oneflow.cuda.is_available" ]
[((1021, 1057), 'logging.getLogger', 'logging.getLogger', (['"""libai.inference"""'], {}), "('libai.inference')\n", (1038, 1057), False, 'import logging\n'), ((995, 1010), 'libai.utils.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1008, 1010), True, 'from libai.utils import distributed as dist\n'), ((1359, 1387), 'libai.config.LazyConfig.load', 'LazyConfig.load', (['config_file'], {}), '(config_file)\n', (1374, 1387), False, 'from libai.config import LazyConfig, try_get_key\n'), ((1749, 1790), 'libai.utils.distributed.setup_dist_util', 'dist.setup_dist_util', (['self.cfg.train.dist'], {}), '(self.cfg.train.dist)\n', (1769, 1790), True, 'from libai.utils import distributed as dist\n'), ((4879, 4903), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (4901, 4903), True, 'import oneflow as flow\n'), ((1454, 1521), 'libai.config.try_get_key', 'try_get_key', (['self.cfg', '"""train.nccl_fusion_threshold_mb"""'], {'default': '(16)'}), "(self.cfg, 'train.nccl_fusion_threshold_mb', default=16)\n", (1465, 1521), False, 'from libai.config import LazyConfig, try_get_key\n'), ((1593, 1655), 'libai.config.try_get_key', 'try_get_key', (['self.cfg', '"""train.nccl_fusion_max_ops"""'], {'default': '(24)'}), "(self.cfg, 'train.nccl_fusion_max_ops', default=24)\n", (1604, 1655), False, 'from libai.config import LazyConfig, try_get_key\n'), ((3255, 3287), 'libai.config.try_get_key', 'try_get_key', (['cfg', '"""tokenization"""'], {}), "(cfg, 'tokenization')\n", (3266, 3287), False, 'from libai.config import LazyConfig, try_get_key\n'), ((3325, 3360), 'libai.engine.DefaultTrainer.build_tokenizer', 'DefaultTrainer.build_tokenizer', (['cfg'], {}), '(cfg)\n', (3355, 3360), False, 'from libai.engine import DefaultTrainer\n'), ((4065, 4079), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (4077, 4079), True, 'import oneflow as flow\n'), ((4323, 4345), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (4343, 4345), True, 'from libai.utils import distributed as dist\n'), ((4501, 4519), 'libai.utils.distributed.synchronize', 'dist.synchronize', ([], {}), '()\n', (4517, 4519), True, 'from libai.utils import distributed as dist\n'), ((4917, 4935), 'libai.utils.distributed.synchronize', 'dist.synchronize', ([], {}), '()\n', (4933, 4935), True, 'from libai.utils import distributed as dist\n'), ((2046, 2082), 'libai.engine.DefaultTrainer.build_model', 'DefaultTrainer.build_model', (['self.cfg'], {}), '(self.cfg)\n', (2072, 2082), False, 'from libai.engine import DefaultTrainer\n'), ((2841, 2896), 'libai.config.try_get_key', 'try_get_key', (['self.cfg.train.dist', '"""pipeline_num_layers"""'], {}), "(self.cfg.train.dist, 'pipeline_num_layers')\n", (2852, 2896), False, 'from libai.config import LazyConfig, try_get_key\n'), ((3057, 3107), 'libai.utils.checkpoint.Checkpointer', 'Checkpointer', (['model'], {'save_dir': 'cfg.train.output_dir'}), '(model, save_dir=cfg.train.output_dir)\n', (3069, 3107), False, 'from libai.utils.checkpoint import Checkpointer\n'), ((4756, 4829), 'libai.utils.distributed.ttol', 'dist.ttol', (['value'], {'ranks': '([0] if value.placement.ranks.ndim == 1 else [[0]])'}), '(value, ranks=[0] if value.placement.ranks.ndim == 1 else [[0]])\n', (4765, 4829), True, 'from libai.utils import distributed as dist\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import numpy as np import oneflow as flow import oneflow.typing as oft from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type import unittest import os def TestDataTypeAttr(input, output_type): assert output_type in flow.dtypes() return ( flow.user_op_builder("TestDataTypeAttr") .Op("TestDataTypeAttr") .Input("in", [input]) .Output("out") .Attr("output_type", output_type) .Build() .InferAndTryRun() .RemoteBlobList()[0] ) def RunTest(data_type): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) @flow.global_function(function_config=func_config) def TestDataTypeAttrJob(input: oft.Numpy.Placeholder((10, 10), dtype=flow.float)): return TestDataTypeAttr(input, type_name_to_flow_type[data_type]) input = np.random.random_sample((10, 10)).astype(np.float32) output = TestDataTypeAttrJob(input).get().numpy() assert output.dtype == type_name_to_np_type[data_type] @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_data_type_attr(test_case): # TODO: fix bugs in ForeignOutputKernel with "float16" and "char" dtype, do not test these two dtypes here for data_type in ["float32", "double", "int8", "int32", "int64", "uint8"]: RunTest(data_type)
[ "oneflow.FunctionConfig", "oneflow.dtypes", "oneflow.global_function", "oneflow.user_op_builder", "oneflow.typing.Numpy.Placeholder", "oneflow.clear_default_session" ]
[((1184, 1212), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1210, 1212), True, 'import oneflow as flow\n'), ((1231, 1252), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1250, 1252), True, 'import oneflow as flow\n'), ((1305, 1354), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1325, 1354), True, 'import oneflow as flow\n'), ((1714, 1748), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1723, 1748), False, 'import os\n'), ((873, 886), 'oneflow.dtypes', 'flow.dtypes', ([], {}), '()\n', (884, 886), True, 'import oneflow as flow\n'), ((1390, 1439), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10, 10)'], {'dtype': 'flow.float'}), '((10, 10), dtype=flow.float)\n', (1411, 1439), True, 'import oneflow.typing as oft\n'), ((1529, 1562), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), '((10, 10))\n', (1552, 1562), True, 'import numpy as np\n'), ((908, 948), 'oneflow.user_op_builder', 'flow.user_op_builder', (['"""TestDataTypeAttr"""'], {}), "('TestDataTypeAttr')\n", (928, 948), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module from oneflow.python.oneflow_export import oneflow_export, experimental_api from oneflow.python.framework.tensor import register_tensor_op def check_dim(num_dims, input_dim): if input_dim == None: dim = input_dim elif isinstance(input_dim, (int, tuple)): if isinstance(input_dim, int): dim = input_dim if input_dim >= 0 else input_dim + num_dims if dim >= num_dims or dim < 0: raise IndexError("Dimension out of range") else: temp = list(input_dim) for i in range(len(temp)): temp[i] = temp[i] if temp[i] >= 0 else temp[i] + num_dims if temp[i] >= num_dims or temp[i] < 0: raise IndexError("Dimension out of range") dim = temp else: raise TypeError( "linalg_vector_norm(): argument 'dim' must be tuple of ints, not {}".format( type(input_dim) ) ) return dim def _norm_min_max(input, ord, dim, keepdim): if ord > 0: return flow.experimental.max(input, dim=dim, keepdim=keepdim) else: return flow.experimental.min(input, dim=dim, keepdim=keepdim) class Vector_Norm(Module): def __init__(self, ord=2, dim=None, keepdim=False) -> None: super().__init__() if ord == None: self.ord = 2.0 elif isinstance(ord, (int, float)): self.ord = float(ord) else: raise TypeError( "linalg_vector_norm(): argument 'ord' must be Number, not {}".format( type(ord) ) ) self.dim = dim self.keepdim = keepdim def _vector_norm(self, x, ord, dim, keepdim=False): if ord == 0: # TODO: fix error when input are all zero vector return flow.experimental.cast( flow.tensor([flow.experimental.argwhere(x).shape[0]]), flow.float32 ) elif ord == float("inf"): return flow.experimental.max( flow.experimental.abs(x), dim=dim, keepdim=keepdim ) elif ord == float("-inf"): return flow.experimental.min( flow.experimental.abs(x), dim=dim, keepdim=keepdim ) else: return flow.experimental.pow( flow.experimental.sum( flow.experimental.pow(flow.experimental.abs(x), ord), dim=dim, keepdim=keepdim, ), 1.0 / ord, ) def forward(self, x): num_dims = len(x.shape) dim = check_dim(num_dims, self.dim) if dim == None: return self._vector_norm( x.flatten(), ord=self.ord, dim=self.dim, keepdim=self.keepdim ) else: return self._vector_norm(x, ord=self.ord, dim=dim, keepdim=self.keepdim) class Matrix_Norm(Module): def __init__(self, ord="fro", dim=(-2, -1), keepdim=False) -> None: super().__init__() if isinstance(ord, str): assert ord in ["fro", "nuc"], "{} are not supported in matrix norm".format( ord ) self.ord = ord elif isinstance(ord, float): assert ord in [ float("inf"), float("-inf"), ], "{} are not supported in matrix norm".format(ord) self.ord = ord elif isinstance(ord, int): assert ord in [1, -1, 2, -2], "{} are not supported in matrix norm".format( ord ) self.ord = ord elif ord == None: self.ord = "fro" else: raise TypeError( "linalg_matrix_norm(): argument 'ord' must be Number, not {}".format( type(ord) ) ) if isinstance(dim, tuple) and len(dim) == 2 and dim[0] != dim[1]: self.dim = dim else: raise TypeError( "linalg.matrix_norm(): dim must be a 2-tuple of ints with different elements" ) self.keepdim = keepdim def _matrix_norm(self, x, ord, dim, keepdim): if ord == "nuc": raise NotImplementedError elif ord == "fro": return flow.experimental.sqrt( flow.experimental.sum( flow.experimental.square(x), dim=dim, keepdim=keepdim ) ) elif ord in [float("inf"), float("-inf")]: dim_0, dim_1 = dim[0], dim[1] dim_0, dim_1 = dim_1, dim_0 if dim_1 > dim_0 and not keepdim: dim_1 -= 1 res = flow.experimental.sum( flow.experimental.abs(x), dim=dim_0, keepdim=keepdim ) return _norm_min_max(res, ord, dim_1, keepdim) elif ord in [1, -1]: dim_0, dim_1 = dim[0], dim[1] if dim_1 > dim_0 and not keepdim: dim_1 -= 1 res = flow.experimental.sum( flow.experimental.abs(x), dim=dim_0, keepdim=keepdim ) return _norm_min_max(res, ord, dim_1, keepdim) elif ord in [2, -2]: raise NotImplementedError else: raise ValueError("Invalid norm order: {}".format(ord)) def forward(self, x): num_dims = len(x.shape) if num_dims < 2: raise RuntimeError( "linalg.matrix_norm(): input tensor must be a matrix or batch of matrices" ) dim = check_dim(num_dims, self.dim) return self._matrix_norm(x, ord=self.ord, dim=dim, keepdim=self.keepdim) class Norm(Module): def __init__(self, ord=None, dim=None, keepdim=False) -> None: super().__init__() self.ord = ord self.dim = dim self.keepdim = keepdim def forward(self, x): if isinstance(self.dim, int): res = Vector_Norm(ord=self.ord, dim=self.dim, keepdim=self.keepdim)(x) elif isinstance(self.dim, tuple): res = Matrix_Norm(ord=self.ord, dim=self.dim, keepdim=self.keepdim)(x) elif self.dim == None and self.ord != None: assert ( len(x.shape) <= 2 ), "input must be 1-D or 2-D when dim is None and ord is not None" if len(x.shape) == 1: res = Vector_Norm(ord=self.ord, keepdim=self.keepdim)(x) else: res = Matrix_Norm(ord=self.ord, keepdim=self.keepdim)(x) elif self.dim == None and self.ord == None: res = Vector_Norm(keepdim=self.keepdim)(x) return res @oneflow_export("linalg.norm") @experimental_api def norm_op(input, ord=None, dim=None, keepdim=False): r"""linalg.norm(input, ord=None, dim=None, keepdim=False, *, out=None) -> Tensor Returns the matrix norm or vector norm of a given tensor. This function can calculate one of eight different types of matrix norms, or one of an infinite number of vector norms, depending on both the number of reduction dimensions and the value of the `ord` parameter. Args: input (Tensor): The input tensor. If dim is None, input must be 1-D or 2-D, unless :attr:`ord` is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D will be returned. Its data type must be either a floating point or complex type. For complex inputs, the norm is calculated on of the absolute values of each element. If the input is complex and neither :attr:`dtype` nor :attr:`out` is specified, the result's data type will be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat). ord (int, float, inf, -inf, 'fro', 'nuc', optional): The order of norm. inf refers to :attr:`float('inf')`, numpy's :attr:`inf` object, or any equivalent object. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- not supported -- 'nuc' -- not supported yet -- -- not supported -- inf max(sum(abs(x), dim=1)) max(abs(x)) -inf min(sum(abs(x), dim=1)) min(abs(x)) 0 -- not supported -- sum(x != 0) 1 max(sum(abs(x), dim=0)) as below -1 min(sum(abs(x), dim=0)) as below 2 -- not supported yet -- as below -2 -- not supported yet -- as below other -- not supported -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== Default: ``None`` dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int, vector norm will be calculated over the specified dimension. If :attr:`dim` is a 2-tuple of ints, matrix norm will be calculated over the specified dimensions. If :attr:`dim` is None, matrix norm will be calculated when the input tensor has two dimensions, and vector norm will be calculated when the input tensor has one dimension. Default: ``None`` keepdim (bool, optional): If set to True, the reduced dimensions are retained in the result as dimensions with size one. Default: ``False`` out (Tensor, optional): The output tensor. Examples:: >>> import oneflow.experimental as flow >>> from oneflow.experimental import linalg as LA >>> import numpy as np >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4) >>> a tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32) >>> b = a.reshape((3, 3)) >>> b tensor([[-4., -3., -2.], [-1., 0., 1.], [ 2., 3., 4.]], dtype=oneflow.float32) >>> LA.norm(a) tensor([7.746], dtype=oneflow.float32) >>> LA.norm(b) tensor([7.746], dtype=oneflow.float32) >>> LA.norm(b, 'fro') tensor([7.746], dtype=oneflow.float32) >>> LA.norm(a, float('inf')) tensor([4.], dtype=oneflow.float32) >>> LA.norm(b, float('inf')) tensor([9.], dtype=oneflow.float32) >>> LA.norm(a, -float('inf')) tensor([0.], dtype=oneflow.float32) >>> LA.norm(b, -float('inf')) tensor([2.], dtype=oneflow.float32) >>> LA.norm(a, 1) tensor([20.], dtype=oneflow.float32) >>> LA.norm(b, 1) tensor([7.], dtype=oneflow.float32) >>> LA.norm(a, -1) tensor([0.], dtype=oneflow.float32) >>> LA.norm(b, -1) tensor([6.], dtype=oneflow.float32) >>> LA.norm(a, 2) tensor([7.746], dtype=oneflow.float32) >>> LA.norm(a, -2) tensor([0.], dtype=oneflow.float32) >>> LA.norm(a, 3) tensor([5.848], dtype=oneflow.float32) >>> LA.norm(a, -3) tensor([0.], dtype=oneflow.float32) Using the :attr:`dim` argument to compute vector norms:: >>> c = flow.tensor([[1., 2., 3.], ... [-1, 1, 4]]) >>> LA.norm(c, dim=0) tensor([1.4142, 2.2361, 5. ], dtype=oneflow.float32) >>> LA.norm(c, dim=1, keepdim = True) tensor([[3.7417], [4.2426]], dtype=oneflow.float32) >>> LA.norm(c, ord=1, dim=1) tensor([6., 6.], dtype=oneflow.float32) Using the :attr:`dim` argument to compute matrix norms:: >>> m = flow.tensor(np.arange(8, dtype=np.float32)).reshape((2, 2, 2)) >>> LA.norm(m, dim=(1,2)) tensor([ 3.7417, 11.225 ], dtype=oneflow.float32) """ return Norm(ord, dim, keepdim)(input) @register_tensor_op("norm") @experimental_api def norm_tensor_op(input, ord=None, dim=None, keepdim=False): r""" See :func:`oneflow.experimental.linalg.norm` """ return Norm(ord, dim, keepdim)(input) @oneflow_export("linalg.vector_norm") @experimental_api def vector_norm_tensor_op(input, ord=2, dim=None, keepdim=False): r""" linalg.vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor Computes a vector norm. Supports input of float, double dtypes. This function does not necessarily treat multidimensonal attr:`input` as a batch of vectors, instead: - If :attr:`dim`\ `= None`, :attr:`input` will be flattened before the norm is computed. - If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions and the other dimensions will be treated as batch dimensions. This behavior is for consistency with :func:`flow.linalg.norm`. :attr:`ord` defines the vector norm that is computed. The following norms are supported: ====================== ======================================================== :attr:`ord` vector norm ====================== ======================================================== `2` (default) `2`-norm (see below) `inf` `max(abs(x))` `-inf` `min(abs(x))` `0` `sum(x != 0)` other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}` ====================== ======================================================== where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. Args: input (Tensor): tensor, flattened by default, but this behavior can be controlled using :attr:`dim`. ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2` dim (int, Tuple[int], optional): dimensions over which to compute the norm. See above for the behavior when :attr:`dim`\ `= None`. Default: `None` keepdim (bool, optional): If set to `True`, the reduced dimensions are retained in the result as dimensions with size one. Default: `False` Returns: A real-valued tensor. Examples:: >>> import oneflow.experimental as flow >>> from oneflow.experimental import linalg as LA >>> import numpy as np >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4) >>> a tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32) >>> b = a.reshape((3, 3)) >>> b tensor([[-4., -3., -2.], [-1., 0., 1.], [ 2., 3., 4.]], dtype=oneflow.float32) >>> LA.vector_norm(a, ord=3.5) tensor([5.4345], dtype=oneflow.float32) >>> LA.vector_norm(b, ord=3.5) tensor([5.4345], dtype=oneflow.float32) """ return Vector_Norm(ord, dim, keepdim)(input) @oneflow_export("linalg.matrix_norm") @experimental_api def matrix_norm_tensor_op(input, ord="fro", dim=(-2, -1), keepdim=False): r""" linalg.matrix_norm(input, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor Computes a matrix norm. Support input of float, double, cfloat and cdouble dtypes. Also supports batches of matrices: the norm will be computed over the dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will be treated as batch dimensions. The output will have the same batch dimensions. :attr:`ord` defines the matrix norm that is computed. The following norms are supported: ====================== ======================================================== :attr:`ord` matrix norm ====================== ======================================================== `'fro'` (default) Frobenius norm `'nuc'` -- not supported yet -- `inf` `max(sum(abs(x), dim=1))` `-inf` `min(sum(abs(x), dim=1))` `1` `max(sum(abs(x), dim=0))` `-1` `min(sum(abs(x), dim=0))` `2` -- not supported yet -- `-2` -- not supported yet -- ====================== ======================================================== where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. Args: input (Tensor): tensor with two or more dimensions. By default its shape is interpreted as `(*, m, n)` where `*` is zero or more batch dimensions, but this behavior can be controlled using :attr:`dim`. ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'` dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)` keepdim (bool, optional): If set to `True`, the reduced dimensions are retained in the result as dimensions with size one. Default: `False` Returns: A real-valued tensor. Examples:: >>> import oneflow.experimental as flow >>> from oneflow.experimental import linalg as LA >>> import numpy as np >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32)).reshape((3,3)) >>> a tensor([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], dtype=oneflow.float32) >>> LA.matrix_norm(a) tensor([14.2829], dtype=oneflow.float32) >>> LA.matrix_norm(a, ord=-1) tensor([9.], dtype=oneflow.float32) >>> b = a.expand(2, -1, -1) >>> b tensor([[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], <BLANKLINE> [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]], dtype=oneflow.float32) >>> LA.matrix_norm(b) tensor([14.2829, 14.2829], dtype=oneflow.float32) >>> LA.matrix_norm(b, dim=(0, 2)) tensor([ 3.1623, 10. , 17.2627], dtype=oneflow.float32) """ return Matrix_Norm(ord, dim, keepdim)(input) if __name__ == "__main__": import doctest doctest.testmod(raise_on_error=True)
[ "oneflow.experimental.abs", "oneflow.experimental.argwhere", "oneflow.experimental.square", "oneflow.experimental.min", "oneflow.python.framework.tensor.register_tensor_op", "oneflow.python.oneflow_export.oneflow_export", "oneflow.experimental.max" ]
[((7357, 7386), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""linalg.norm"""'], {}), "('linalg.norm')\n", (7371, 7386), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((12806, 12832), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""norm"""'], {}), "('norm')\n", (12824, 12832), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((13024, 13060), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""linalg.vector_norm"""'], {}), "('linalg.vector_norm')\n", (13038, 13060), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((15850, 15886), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""linalg.matrix_norm"""'], {}), "('linalg.matrix_norm')\n", (15864, 15886), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((19123, 19159), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (19138, 19159), False, 'import doctest\n'), ((1720, 1774), 'oneflow.experimental.max', 'flow.experimental.max', (['input'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input, dim=dim, keepdim=keepdim)\n', (1741, 1774), True, 'import oneflow as flow\n'), ((1800, 1854), 'oneflow.experimental.min', 'flow.experimental.min', (['input'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input, dim=dim, keepdim=keepdim)\n', (1821, 1854), True, 'import oneflow as flow\n'), ((2721, 2745), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (2742, 2745), True, 'import oneflow as flow\n'), ((2879, 2903), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (2900, 2903), True, 'import oneflow as flow\n'), ((5079, 5106), 'oneflow.experimental.square', 'flow.experimental.square', (['x'], {}), '(x)\n', (5103, 5106), True, 'import oneflow as flow\n'), ((5429, 5453), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (5450, 5453), True, 'import oneflow as flow\n'), ((5757, 5781), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (5778, 5781), True, 'import oneflow as flow\n'), ((2560, 2589), 'oneflow.experimental.argwhere', 'flow.experimental.argwhere', (['x'], {}), '(x)\n', (2586, 2589), True, 'import oneflow as flow\n'), ((3081, 3105), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (3102, 3105), True, 'import oneflow as flow\n')]
""" Modified from https://github.com/z-x-yang/GCT/blob/master/PyTorch/GCT.py """ import oneflow as flow import oneflow.nn as nn import oneflow.nn.functional as F class GCTModule(nn.Module): def __init__(self, channels, epsilon=1e-5, mode="l2", after_relu=False): super(GCTModule, self).__init__() self.alpha = nn.Parameter(flow.ones(1, channels, 1, 1)) self.gamma = nn.Parameter(flow.zeros(1, channels, 1, 1)) self.beta = nn.Parameter(flow.zeros(1, channels, 1, 1)) self.epsilon = epsilon self.mode = mode self.after_relu = after_relu def forward(self, x): assert self.mode in ["l1", "l2"], "Unknown mode type in GCTModule" if self.mode == "l2": embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon).pow( 0.5 ) * self.alpha norm = self.gamma / ( embedding.pow(2).mean(dim=1, keepdim=True) + self.epsilon ).pow(0.5) elif self.mode == "l1": if not self.after_relu: _x = flow.abs(x) else: _x = x embedding = _x.sum((2, 3), keepdim=True) * self.alpha norm = self.gamma / ( flow.abs(embedding).mean(dim=1, keepdim=True) + self.epsilon ) gate = 1.0 + flow.tanh(embedding * norm + self.beta) return x * gate
[ "oneflow.abs", "oneflow.zeros", "oneflow.tanh", "oneflow.ones" ]
[((347, 375), 'oneflow.ones', 'flow.ones', (['(1)', 'channels', '(1)', '(1)'], {}), '(1, channels, 1, 1)\n', (356, 375), True, 'import oneflow as flow\n'), ((411, 440), 'oneflow.zeros', 'flow.zeros', (['(1)', 'channels', '(1)', '(1)'], {}), '(1, channels, 1, 1)\n', (421, 440), True, 'import oneflow as flow\n'), ((475, 504), 'oneflow.zeros', 'flow.zeros', (['(1)', 'channels', '(1)', '(1)'], {}), '(1, channels, 1, 1)\n', (485, 504), True, 'import oneflow as flow\n'), ((1346, 1385), 'oneflow.tanh', 'flow.tanh', (['(embedding * norm + self.beta)'], {}), '(embedding * norm + self.beta)\n', (1355, 1385), True, 'import oneflow as flow\n'), ((1080, 1091), 'oneflow.abs', 'flow.abs', (['x'], {}), '(x)\n', (1088, 1091), True, 'import oneflow as flow\n'), ((1249, 1268), 'oneflow.abs', 'flow.abs', (['embedding'], {}), '(embedding)\n', (1257, 1268), True, 'import oneflow as flow\n')]
# !/usr/bin/env python # -*- coding:utf-8 -*- """ Copyright 2020 Tianshu AI Platform. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================= """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import codecs import os import numpy as np from PIL import Image import oneflow as flow from of_model.resnet_model import resnet50 sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach()) def init_resnet(): """Initialize ResNet with pretrained weights""" model_load_dir = 'of_model/resnet_v15_of_best_model_val_top1_773/' assert os.path.isdir(model_load_dir) check_point = flow.train.CheckPoint() check_point.load(model_load_dir) def load_image(image_path): """Load and preprocess the image""" rgb_mean = [123.68, 116.779, 103.939] rgb_std = [58.393, 57.12, 57.375] im = Image.open(image_path).convert('RGB') im = im.resize((224, 224)) im = np.array(im).astype('float32') im = (im - rgb_mean) / rgb_std im = np.transpose(im, (2, 0, 1)) im = np.expand_dims(im, axis=0) return np.ascontiguousarray(im, 'float32') @flow.global_function(flow.function_config()) def InferenceNet(images=flow.FixedTensorDef( (1, 3, 224, 224), dtype=flow.float)): """Run the inference of ResNet""" logits = resnet50(images, training=False) predictions = flow.nn.softmax(logits) return predictions def resnet_inf(image_path): """The whole procedure of inference of ResNet and return the category_id and the corresponding score""" image = load_image(image_path.encode('utf-8')) predictions = InferenceNet(image).get() clsidx = predictions.ndarray().argmax() + 161 return predictions.ndarray().max(), clsidx
[ "oneflow.function_config", "oneflow.train.CheckPoint", "oneflow.FixedTensorDef", "oneflow.nn.softmax" ]
[((968, 993), 'codecs.getwriter', 'codecs.getwriter', (['"""utf-8"""'], {}), "('utf-8')\n", (984, 993), False, 'import codecs\n'), ((994, 1013), 'sys.stdout.detach', 'sys.stdout.detach', ([], {}), '()\n', (1011, 1013), False, 'import sys\n'), ((1169, 1198), 'os.path.isdir', 'os.path.isdir', (['model_load_dir'], {}), '(model_load_dir)\n', (1182, 1198), False, 'import os\n'), ((1217, 1240), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1238, 1240), True, 'import oneflow as flow\n'), ((1590, 1617), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (1602, 1617), True, 'import numpy as np\n'), ((1627, 1653), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (1641, 1653), True, 'import numpy as np\n'), ((1665, 1700), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (1685, 1700), True, 'import numpy as np\n'), ((1773, 1828), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['(1, 3, 224, 224)'], {'dtype': 'flow.float'}), '((1, 3, 224, 224), dtype=flow.float)\n', (1792, 1828), True, 'import oneflow as flow\n'), ((1891, 1923), 'of_model.resnet_model.resnet50', 'resnet50', (['images'], {'training': '(False)'}), '(images, training=False)\n', (1899, 1923), False, 'from of_model.resnet_model import resnet50\n'), ((1942, 1965), 'oneflow.nn.softmax', 'flow.nn.softmax', (['logits'], {}), '(logits)\n', (1957, 1965), True, 'import oneflow as flow\n'), ((1725, 1747), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (1745, 1747), True, 'import oneflow as flow\n'), ((1437, 1459), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1447, 1459), False, 'from PIL import Image\n'), ((1515, 1527), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1523, 1527), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections import math from typing import Callable, Dict, Iterator, List, Tuple, Union import oneflow as flow from oneflow.nn.optimizer.optimizer import Optimizer, ParamGroup from oneflow.nn.parameter import Parameter class Adam(Optimizer): """Implements Adam algorithm. It has been proposed in `Adam: A Method for Stochastic Optimization`_. The implementation of the L2 penalty follows changes proposed in `Decoupled Weight Decay Regularization`_. This algorithm can adjust the learning rate of each parameter dynamically according to the 1st-moment estimates and the 2nd-moment estimates of gradient. the equation of parameters updating is: .. math:: & V_t = \\beta_1*V_{t-1} + (1-\\beta_1)*grad & S_t = \\beta_2*S_{t-1} + (1-\\beta_2)*{grad} \\odot {grad} & \\hat{g} = learning\\_rate*\\frac{{V_t}}{\\sqrt{{S_t}}+\\epsilon} & param_{new} = param_{old} - \\hat{g} Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm. (default: False) do_bias_correction (bool, optional): Whether do bias correction (default: True) .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 For example: Example 1: .. code-block:: python # Assume net is a custom model. adam = flow.optim.Adam(net.parameters(), lr=1e-3) for epoch in range(epochs): # Read data, Compute the loss and so on. # ... loss.backward() adam.step() adam.zero_grad() Example 2: .. code-block:: python # Assume net is a custom model. adam = flow.optim.Adam( [ { "params": net.parameters(), "lr": learning_rate, "clip_grad_max_norm": 0.5, "clip_grad_norm_type": 2.0, } ], ) for epoch in range(epochs): # Read data, Compute the loss and so on. # ... loss.backward() adam.clip_grad() adam.step() adam.zero_grad() If you want to use clip_grad, you can refer this example. For more details of `clip_grad_max_norm` and `clip_grad_norm_type`, you can refer to :func:`oneflow.nn.utils.clip_grad_norm_`. """ def __init__( self, parameters: Union[Iterator[Parameter], List[Dict]], lr: float = 0.001, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, amsgrad: bool = False, do_bias_correction: bool = True, ): assert lr >= 0.0, f"Invalid learning rate: {lr}" assert eps >= 0.0, f"Invalid epsilon value: {eps}" assert ( betas[0] >= 0.0 and betas[0] < 1.0 ), f"Invalid beta parameter at index 0: {betas[0]}" assert ( betas[1] >= 0.0 and betas[1] < 1.0 ), f"Invalid beta parameter at index 1: {betas[1]}" assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}" options = dict() options["lr"] = lr options["eps"] = eps options["betas"] = betas options["weight_decay"] = weight_decay options["amsgrad"] = amsgrad options["bias_correction1"] = 1.0 options["bias_correction2"] = 1.0 options["do_bias_correction"] = do_bias_correction super().__init__(parameters, options) for param_group in self.param_groups: for param in param_group.parameters: assert param.is_leaf, "parameters must be leaf tensor" self._state[param] = dict() self._op = ( flow.builtin_op("adam_update") .Input("model") .Input("model_diff") .Input("m") .Input("v") .Input("max_v") .Attr("l1", 0.0) .Attr("weight_decay", 0.0) .Build() ) def step(self, closure: Callable = None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ with flow.no_grad(): loss = None if closure is not None: loss = closure() for param_group in self.param_groups: if param_group["do_bias_correction"]: param_group["bias_correction1"] = 1.0 - math.pow( param_group["betas"][0], self._state["step"] + 1 ) param_group["bias_correction2"] = 1.0 - math.pow( param_group["betas"][1], self._state["step"] + 1 ) kwargs = { "learning_rate_val": param_group["lr"], "bias_correction1_val": param_group["bias_correction1"], "bias_correction2_val": param_group["bias_correction2"], "l2": param_group["weight_decay"], "beta1": param_group["betas"][0], "beta2": param_group["betas"][1], "epsilon": param_group["eps"], "do_bias_correction": param_group["do_bias_correction"], "amsgrad": param_group["amsgrad"], } for param in param_group.parameters: if param.grad is None: continue if "exp_avg" not in self._state[param]: self._state[param]["exp_avg"] = flow.zeros_like(param) if "exp_avg_sq" not in self._state[param]: self._state[param]["exp_avg_sq"] = flow.zeros_like(param) if "max_exp_avg_sq" not in self._state[param]: self._state[param]["max_exp_avg_sq"] = flow.zeros_like(param) m_tensor = self._state[param]["exp_avg"] v_tensor = self._state[param]["exp_avg_sq"] max_v_tensor = self._state[param]["max_exp_avg_sq"] self._op( param, param.grad, m_tensor, v_tensor, max_v_tensor, **kwargs, ) self._state["step"] += 1 return loss def _generate_conf_for_graph(self, train_conf, vars_conf): new_opt_confs = [] for param_group in self.param_groups: optimizer_conf = train_conf.mutable_optimizer_conf().Add() lr = ( param_group["initial_lr"] if "initial_lr" in param_group else param_group["lr"] ) l2 = param_group["weight_decay"] beta1 = param_group["betas"][0] beta2 = param_group["betas"][1] epsilon = param_group["eps"] do_bias_correction = param_group["do_bias_correction"] amsgrad = param_group["amsgrad"] optimizer_conf.set_base_learning_rate(lr) optimizer_conf.mutable_adam_conf().set_beta1(beta1) optimizer_conf.mutable_adam_conf().set_beta2(beta2) optimizer_conf.mutable_adam_conf().set_epsilon(epsilon) optimizer_conf.mutable_adam_conf().set_do_bias_correction( do_bias_correction ) optimizer_conf.mutable_adam_conf().set_amsgrad(amsgrad) self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf) for param in param_group.parameters: vars_conf[param].l2 = l2 if param.requires_grad: optimizer_conf.add_variable_op_names(vars_conf[param].name) new_opt_confs.append(optimizer_conf) return new_opt_confs
[ "oneflow.zeros_like", "oneflow.builtin_op", "oneflow.no_grad" ]
[((5523, 5537), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (5535, 5537), True, 'import oneflow as flow\n'), ((5797, 5855), 'math.pow', 'math.pow', (["param_group['betas'][0]", "(self._state['step'] + 1)"], {}), "(param_group['betas'][0], self._state['step'] + 1)\n", (5805, 5855), False, 'import math\n'), ((5962, 6020), 'math.pow', 'math.pow', (["param_group['betas'][1]", "(self._state['step'] + 1)"], {}), "(param_group['betas'][1], self._state['step'] + 1)\n", (5970, 6020), False, 'import math\n'), ((6918, 6940), 'oneflow.zeros_like', 'flow.zeros_like', (['param'], {}), '(param)\n', (6933, 6940), True, 'import oneflow as flow\n'), ((7063, 7085), 'oneflow.zeros_like', 'flow.zeros_like', (['param'], {}), '(param)\n', (7078, 7085), True, 'import oneflow as flow\n'), ((7216, 7238), 'oneflow.zeros_like', 'flow.zeros_like', (['param'], {}), '(param)\n', (7231, 7238), True, 'import oneflow as flow\n'), ((5004, 5034), 'oneflow.builtin_op', 'flow.builtin_op', (['"""adam_update"""'], {}), "('adam_update')\n", (5019, 5034), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow import oneflow.typing as oft import numpy as np import os import unittest @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_1n1c(test_case): dcgan = DCGAN() dcgan.compare_with_tf(1) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_1n4c(test_case): dcgan = DCGAN() dcgan.compare_with_tf(4) class DCGAN: def __init__(self): self.lr = 1e-4 self.z_dim = 100 self.batch_size = 32 def compare_with_tf(self, gpu_num, result_dir="/dataset/gan_test/dcgan/"): flow.config.gpu_device_num(gpu_num) func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.consistent_view()) @flow.global_function(type="train", function_config=func_config) def test_generator( z: oft.Numpy.Placeholder((self.batch_size, self.z_dim)), label1: oft.Numpy.Placeholder((self.batch_size, 1)), ): g_out = self.generator(z, trainable=True, const_init=True) g_logits = self.discriminator(g_out, trainable=False, const_init=True) g_loss = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(g_logits), g_logits, name="Gloss_sigmoid_cross_entropy_with_logits", ) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [self.lr]), momentum=0 ).minimize(g_loss) return g_loss @flow.global_function(type="train", function_config=func_config) def test_discriminator( z: oft.Numpy.Placeholder((self.batch_size, 100)), images: oft.Numpy.Placeholder((self.batch_size, 1, 28, 28)), label1: oft.Numpy.Placeholder((self.batch_size, 1)), label0: oft.Numpy.Placeholder((self.batch_size, 1)), ): g_out = self.generator(z, trainable=False, const_init=True) g_logits = self.discriminator(g_out, trainable=True, const_init=True) d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits( flow.zeros_like(g_logits), g_logits, name="Dloss_fake_sigmoid_cross_entropy_with_logits", ) d_logits = self.discriminator( images, trainable=True, reuse=True, const_init=True ) d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(d_logits), d_logits, name="Dloss_real_sigmoid_cross_entropy_with_logits", ) d_loss = d_loss_fake + d_loss_real flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [self.lr]), momentum=0 ).minimize(d_loss) return d_loss check_point = flow.train.CheckPoint() check_point.init() z = np.load(os.path.join(result_dir, "z.npy")) imgs = np.load(os.path.join(result_dir, "img.npy")).transpose(0, 3, 1, 2) label1 = np.ones((self.batch_size, 1)).astype(np.float32) label0 = np.zeros((self.batch_size, 1)).astype(np.float32) g_loss = test_generator(z, label1).get() d_loss = test_discriminator(z, imgs, label1, label0).get() tf_g_loss = np.load(os.path.join(result_dir, "g_loss.npy")) tf_d_loss = np.load(os.path.join(result_dir, "d_loss.npy")) if gpu_num == 1: # multi-gpu result can not pass assert np.allclose( g_loss.numpy(), tf_g_loss, rtol=1e-2, atol=1e-1 ), "{}-{}".format(g_loss.ndarray().mean(), tf_g_loss.mean()) assert np.allclose( d_loss.numpy(), tf_d_loss, rtol=1e-2, atol=1e-1 ), "{}-{}".format(d_loss.ndarray().mean(), tf_d_loss.mean()) def generator(self, z, const_init=False, trainable=True): # (n, 256, 7, 7) h0 = layers.dense( z, 7 * 7 * 256, name="g_fc1", const_init=const_init, trainable=trainable ) h0 = layers.batchnorm(h0, axis=1, name="g_bn1") h0 = flow.nn.leaky_relu(h0, 0.3) h0 = flow.reshape(h0, (-1, 256, 7, 7)) # (n, 128, 7, 7) h1 = layers.deconv2d( h0, 128, 5, strides=1, name="g_deconv1", const_init=const_init, trainable=trainable, ) h1 = layers.batchnorm(h1, name="g_bn2") h1 = flow.nn.leaky_relu(h1, 0.3) # (n, 64, 14, 14) h2 = layers.deconv2d( h1, 64, 5, strides=2, name="g_deconv2", const_init=const_init, trainable=trainable, ) h2 = layers.batchnorm(h2, name="g_bn3") h2 = flow.nn.leaky_relu(h2, 0.3) # (n, 1, 28, 28) out = layers.deconv2d( h2, 1, 5, strides=2, name="g_deconv3", const_init=const_init, trainable=trainable, ) out = flow.math.tanh(out) return out def discriminator(self, img, const_init=False, trainable=True, reuse=False): # (n, 1, 28, 28) h0 = layers.conv2d( img, 64, 5, name="d_conv1", const_init=const_init, trainable=trainable, reuse=reuse, ) h0 = flow.nn.leaky_relu(h0, 0.3) # h0 = flow.nn.dropout(h0, rate=0.3) # (n, 64, 14, 14) h1 = layers.conv2d( h0, 128, 5, name="d_conv2", const_init=const_init, trainable=trainable, reuse=reuse, ) h1 = flow.nn.leaky_relu(h1, 0.3) # h1 = flow.nn.dropout(h1, rate=0.3) # (n, 128 * 7 * 7) out = flow.reshape(h1, (self.batch_size, -1)) # (n, 1) out = layers.dense( out, 1, name="d_fc", const_init=const_init, trainable=trainable, reuse=reuse ) return out class layers: @staticmethod def deconv2d( input, filters, size, name, strides=2, trainable=True, reuse=False, const_init=False, use_bias=False, ): name_ = name if reuse == False else name + "_reuse" # weight : [in_channels, out_channels, height, width] weight_shape = (input.shape[1], filters, size, size) output_shape = ( input.shape[0], filters, input.shape[2] * strides, input.shape[3] * strides, ) weight = flow.get_variable( name + "-weight", shape=weight_shape, dtype=input.dtype, initializer=flow.random_normal_initializer(stddev=0.02) if not const_init else flow.constant_initializer(0.002), trainable=trainable, reuse=reuse, ) output = flow.nn.conv2d_transpose( input, weight, strides=[strides, strides], output_shape=output_shape, padding="SAME", data_format="NCHW", name=name_, ) if use_bias: bias = flow.get_variable( name + "-bias", shape=(filters,), dtype=input.dtype, initializer=flow.constant_initializer(0.0), trainable=trainable, reuse=reuse, ) output = flow.nn.bias_add(output, bias, "NCHW") return output @staticmethod def conv2d( input, filters, size, name, strides=2, padding="same", trainable=True, reuse=False, const_init=False, use_bias=True, ): name_ = name if reuse == False else name + "_reuse" # (output_dim, k_h, k_w, input.shape[3]) if NHWC weight_shape = (filters, input.shape[1], size, size) weight = flow.get_variable( name + "-weight", shape=weight_shape, dtype=input.dtype, initializer=flow.random_normal_initializer(stddev=0.02) if not const_init else flow.constant_initializer(0.002), trainable=trainable, reuse=reuse, ) output = flow.nn.compat_conv2d( input, weight, strides=[strides, strides], padding=padding, data_format="NCHW", name=name_, ) if use_bias: bias = flow.get_variable( name + "-bias", shape=(filters,), dtype=input.dtype, initializer=flow.constant_initializer(0.0), trainable=trainable, reuse=reuse, ) output = flow.nn.bias_add(output, bias, "NCHW") return output @staticmethod def dense( input, units, name, use_bias=False, trainable=True, reuse=False, const_init=False, ): name_ = name if reuse == False else name + "_reuse" in_shape = input.shape in_num_axes = len(in_shape) assert in_num_axes >= 2 inputs = flow.reshape(input, (-1, in_shape[-1])) if in_num_axes > 2 else input weight = flow.get_variable( name="{}-weight".format(name), shape=(units, inputs.shape[1]), dtype=inputs.dtype, initializer=flow.random_normal_initializer(stddev=0.02) if not const_init else flow.constant_initializer(0.002), trainable=trainable, model_name="weight", reuse=reuse, ) out = flow.matmul(a=inputs, b=weight, transpose_b=True, name=name_ + "matmul",) if use_bias: bias = flow.get_variable( name="{}-bias".format(name), shape=(units,), dtype=inputs.dtype, initializer=flow.random_normal_initializer() if not const_init else flow.constant_initializer(0.002), trainable=trainable, model_name="bias", reuse=reuse, ) out = flow.nn.bias_add(out, bias, name=name_ + "_bias_add") out = flow.reshape(out, in_shape[:-1] + (units,)) if in_num_axes > 2 else out return out @staticmethod def batchnorm(input, name, axis=1, reuse=False): name_ = name if reuse == False else name + "_reuse" return flow.layers.batch_normalization(input, axis=axis, name=name_)
[ "oneflow.FunctionConfig", "oneflow.scope.consistent_view", "oneflow.zeros_like", "oneflow.optimizer.PiecewiseConstantScheduler", "oneflow.global_function", "oneflow.typing.Numpy.Placeholder", "oneflow.nn.leaky_relu", "oneflow.nn.compat_conv2d", "oneflow.nn.bias_add", "oneflow.reshape", "oneflow.nn.conv2d_transpose", "oneflow.matmul", "oneflow.constant_initializer", "oneflow.train.CheckPoint", "oneflow.layers.batch_normalization", "oneflow.config.gpu_device_num", "oneflow.math.tanh", "oneflow.ones_like", "oneflow.random_normal_initializer" ]
[((706, 740), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (715, 740), False, 'import os\n'), ((859, 893), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (868, 893), False, 'import os\n'), ((1197, 1232), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['gpu_num'], {}), '(gpu_num)\n', (1223, 1232), True, 'import oneflow as flow\n'), ((1255, 1276), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1274, 1276), True, 'import oneflow as flow\n'), ((1408, 1471), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1428, 1471), True, 'import oneflow as flow\n'), ((2194, 2257), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2214, 2257), True, 'import oneflow as flow\n'), ((3532, 3555), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3553, 3555), True, 'import oneflow as flow\n'), ((4782, 4809), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['h0', '(0.3)'], {}), '(h0, 0.3)\n', (4800, 4809), True, 'import oneflow as flow\n'), ((4823, 4856), 'oneflow.reshape', 'flow.reshape', (['h0', '(-1, 256, 7, 7)'], {}), '(h0, (-1, 256, 7, 7))\n', (4835, 4856), True, 'import oneflow as flow\n'), ((5152, 5179), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['h1', '(0.3)'], {}), '(h1, 0.3)\n', (5170, 5179), True, 'import oneflow as flow\n'), ((5475, 5502), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['h2', '(0.3)'], {}), '(h2, 0.3)\n', (5493, 5502), True, 'import oneflow as flow\n'), ((5750, 5769), 'oneflow.math.tanh', 'flow.math.tanh', (['out'], {}), '(out)\n', (5764, 5769), True, 'import oneflow as flow\n'), ((6116, 6143), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['h0', '(0.3)'], {}), '(h0, 0.3)\n', (6134, 6143), True, 'import oneflow as flow\n'), ((6435, 6462), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['h1', '(0.3)'], {}), '(h1, 0.3)\n', (6453, 6462), True, 'import oneflow as flow\n'), ((6549, 6588), 'oneflow.reshape', 'flow.reshape', (['h1', '(self.batch_size, -1)'], {}), '(h1, (self.batch_size, -1))\n', (6561, 6588), True, 'import oneflow as flow\n'), ((7693, 7839), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': '[strides, strides]', 'output_shape': 'output_shape', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides],\n output_shape=output_shape, padding='SAME', data_format='NCHW', name=name_)\n", (7717, 7839), True, 'import oneflow as flow\n'), ((9097, 9215), 'oneflow.nn.compat_conv2d', 'flow.nn.compat_conv2d', (['input', 'weight'], {'strides': '[strides, strides]', 'padding': 'padding', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides], padding=\n padding, data_format='NCHW', name=name_)\n", (9118, 9215), True, 'import oneflow as flow\n'), ((10527, 10599), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'inputs', 'b': 'weight', 'transpose_b': '(True)', 'name': "(name_ + 'matmul')"}), "(a=inputs, b=weight, transpose_b=True, name=name_ + 'matmul')\n", (10538, 10599), True, 'import oneflow as flow\n'), ((11364, 11425), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', (['input'], {'axis': 'axis', 'name': 'name_'}), '(input, axis=axis, name=name_)\n', (11395, 11425), True, 'import oneflow as flow\n'), ((1368, 1396), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1394, 1396), True, 'import oneflow as flow\n'), ((3604, 3637), 'os.path.join', 'os.path.join', (['result_dir', '"""z.npy"""'], {}), "(result_dir, 'z.npy')\n", (3616, 3637), False, 'import os\n'), ((3998, 4036), 'os.path.join', 'os.path.join', (['result_dir', '"""g_loss.npy"""'], {}), "(result_dir, 'g_loss.npy')\n", (4010, 4036), False, 'import os\n'), ((4066, 4104), 'os.path.join', 'os.path.join', (['result_dir', '"""d_loss.npy"""'], {}), "(result_dir, 'd_loss.npy')\n", (4078, 4104), False, 'import os\n'), ((8254, 8292), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (8270, 8292), True, 'import oneflow as flow\n'), ((9617, 9655), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (9633, 9655), True, 'import oneflow as flow\n'), ((10036, 10075), 'oneflow.reshape', 'flow.reshape', (['input', '(-1, in_shape[-1])'], {}), '(input, (-1, in_shape[-1]))\n', (10048, 10075), True, 'import oneflow as flow\n'), ((11057, 11110), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['out', 'bias'], {'name': "(name_ + '_bias_add')"}), "(out, bias, name=name_ + '_bias_add')\n", (11073, 11110), True, 'import oneflow as flow\n'), ((11126, 11169), 'oneflow.reshape', 'flow.reshape', (['out', '(in_shape[:-1] + (units,))'], {}), '(out, in_shape[:-1] + (units,))\n', (11138, 11169), True, 'import oneflow as flow\n'), ((1515, 1567), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, self.z_dim)'], {}), '((self.batch_size, self.z_dim))\n', (1536, 1567), True, 'import oneflow.typing as oft\n'), ((1589, 1632), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (1610, 1632), True, 'import oneflow.typing as oft\n'), ((1879, 1903), 'oneflow.ones_like', 'flow.ones_like', (['g_logits'], {}), '(g_logits)\n', (1893, 1903), True, 'import oneflow as flow\n'), ((2305, 2350), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, 100)'], {}), '((self.batch_size, 100))\n', (2326, 2350), True, 'import oneflow.typing as oft\n'), ((2372, 2423), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, 1, 28, 28)'], {}), '((self.batch_size, 1, 28, 28))\n', (2393, 2423), True, 'import oneflow.typing as oft\n'), ((2445, 2488), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (2466, 2488), True, 'import oneflow.typing as oft\n'), ((2510, 2553), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (2531, 2553), True, 'import oneflow.typing as oft\n'), ((2805, 2830), 'oneflow.zeros_like', 'flow.zeros_like', (['g_logits'], {}), '(g_logits)\n', (2820, 2830), True, 'import oneflow as flow\n'), ((3152, 3176), 'oneflow.ones_like', 'flow.ones_like', (['d_logits'], {}), '(d_logits)\n', (3166, 3176), True, 'import oneflow as flow\n'), ((3738, 3767), 'numpy.ones', 'np.ones', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (3745, 3767), True, 'import numpy as np\n'), ((3804, 3834), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (3812, 3834), True, 'import numpy as np\n'), ((3662, 3697), 'os.path.join', 'os.path.join', (['result_dir', '"""img.npy"""'], {}), "(result_dir, 'img.npy')\n", (3674, 3697), False, 'import os\n'), ((7482, 7525), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (7512, 7525), True, 'import oneflow as flow\n'), ((7573, 7605), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (7598, 7605), True, 'import oneflow as flow\n'), ((8120, 8150), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (8145, 8150), True, 'import oneflow as flow\n'), ((8886, 8929), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (8916, 8929), True, 'import oneflow as flow\n'), ((8977, 9009), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (9002, 9009), True, 'import oneflow as flow\n'), ((9483, 9513), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (9508, 9513), True, 'import oneflow as flow\n'), ((10286, 10329), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (10316, 10329), True, 'import oneflow as flow\n'), ((10377, 10409), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (10402, 10409), True, 'import oneflow as flow\n'), ((2058, 2114), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[self.lr]'], {}), '([], [self.lr])\n', (2099, 2114), True, 'import oneflow as flow\n'), ((3382, 3438), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[self.lr]'], {}), '([], [self.lr])\n', (3423, 3438), True, 'import oneflow as flow\n'), ((10802, 10834), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {}), '()\n', (10832, 10834), True, 'import oneflow as flow\n'), ((10890, 10922), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (10915, 10922), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import oneflow as flow import oneflow.unittest @flow.unittest.skip_unless_1n1d() class TestMockModule(flow.unittest.TestCase): def test_mock_device(test_case): device = flow.device("mock") test_case.assertEqual(device.type, "mock") def test_mock_placement(test_case): placement = flow.placement("mock", [0]) test_case.assertEqual(placement.type, "mock") if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.skip_unless_1n1d", "oneflow.placement", "oneflow.device" ]
[((714, 746), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (744, 746), True, 'import oneflow as flow\n'), ((1094, 1109), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1107, 1109), False, 'import unittest\n'), ((847, 866), 'oneflow.device', 'flow.device', (['"""mock"""'], {}), "('mock')\n", (858, 866), True, 'import oneflow as flow\n'), ((979, 1006), 'oneflow.placement', 'flow.placement', (['"""mock"""', '[0]'], {}), "('mock', [0])\n", (993, 1006), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import oneflow.experimental as flow from test_util import GenArgList g_test_samples = [ { "input": np.array( [ [-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, 0.38375184, -0.2831992, -0.9833142, 0.387824], ] ), "target": np.array([3, 3, 4], dtype=np.int32), "ignore_index": 4, "out": np.array([1.1380, 1.7332, 0.0], dtype=np.float32), "out_sum": np.array([2.8711782], dtype=np.float32), "out_mean": np.array([1.4355891], dtype=np.float32), }, { "input": np.array( [[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]] ), "target": np.array([[[1, 0], [0, 1]]], dtype=np.int32), "ignore_index": 1, "out": np.array([[[0.0, 0.6832], [0.8544, 0.0]]], dtype=np.float32), "out_sum": np.array([1.5375525], dtype=np.float32), "out_mean": np.array([0.76877624], dtype=np.float32), }, { "input": np.array( [ [-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, 0.38375184, -0.2831992, -0.9833142, 0.387824], ] ), "target": np.array([3, 3, 4], dtype=np.int32), "out": np.array([1.1380, 1.7332, 1.4287], dtype=np.float32), "out_sum": np.array([4.2999], dtype=np.float32), "out_mean": np.array([1.4333], dtype=np.float32), }, { "input": np.array( [[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]] ), "target": np.array([[[1, 0], [0, 1]]], dtype=np.int32), "out": np.array([[[0.6882, 0.6832], [0.8544, 1.8006]]], dtype=np.float32), "out_sum": np.array([4.0263], dtype=np.float32), "out_mean": np.array([1.0066], dtype=np.float32), }, { "input": np.array( [ [[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]], [[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]], ] ), "target": np.array([[[1, 0], [0, 1]], [[1, 0], [0, 1]]], dtype=np.int32), "out": np.array( [ [[0.6882, 0.6832], [0.8544, 1.8006]], [[0.6882, 0.6832], [0.8544, 1.8006]], ], dtype=np.float32, ), "out_sum": np.array([8.0526], dtype=np.float32), "out_mean": np.array([1.0066], dtype=np.float32), }, { "input": np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]), "target": np.array([[1, 0, 0, 1]], dtype=np.int32), "out": np.array([[0.6882, 0.6832, 0.8544, 1.8006]], dtype=np.float32,), "out_sum": np.array([4.0263], dtype=np.float32), "out_mean": np.array([1.0066], dtype=np.float32), }, ] @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestCrossEntropyLossModule(flow.unittest.TestCase): def test_CrossEntropyLoss(test_case): global g_test_samples for sample in g_test_samples: ignore_index = sample.get("ignore_index", None) input = flow.Tensor(sample["input"], dtype=flow.float32) target = flow.Tensor(sample["target"], dtype=flow.int32) loss = flow.nn.CrossEntropyLoss(reduction=None, ignore_index=ignore_index) of_out = loss(input, target) assert np.allclose(of_out.numpy(), sample["out"], 1e-4, 1e-4) loss_sum = flow.nn.CrossEntropyLoss( reduction="sum", ignore_index=ignore_index ) of_out_sum = loss_sum(input, target) assert np.allclose(of_out_sum.numpy(), sample["out_sum"], 1e-4, 1e-4) loss_mean = flow.nn.CrossEntropyLoss( reduction="mean", ignore_index=ignore_index ) of_out_mean = loss_mean(input, target) assert np.allclose(of_out_mean.numpy(), sample["out_mean"], 1e-4, 1e-4) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.nn.CrossEntropyLoss", "oneflow.experimental.unittest.env.eager_execution_enabled", "oneflow.experimental.Tensor" ]
[((4904, 4919), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4917, 4919), False, 'import unittest\n'), ((775, 975), 'numpy.array', 'np.array', (['[[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-0.26332688,\n 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, 0.38375184, -\n 0.2831992, -0.9833142, 0.387824]]'], {}), '([[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-\n 0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, \n 0.38375184, -0.2831992, -0.9833142, 0.387824]])\n', (783, 975), True, 'import numpy as np\n'), ((1070, 1105), 'numpy.array', 'np.array', (['[3, 3, 4]'], {'dtype': 'np.int32'}), '([3, 3, 4], dtype=np.int32)\n', (1078, 1105), True, 'import numpy as np\n'), ((1149, 1197), 'numpy.array', 'np.array', (['[1.138, 1.7332, 0.0]'], {'dtype': 'np.float32'}), '([1.138, 1.7332, 0.0], dtype=np.float32)\n', (1157, 1197), True, 'import numpy as np\n'), ((1219, 1258), 'numpy.array', 'np.array', (['[2.8711782]'], {'dtype': 'np.float32'}), '([2.8711782], dtype=np.float32)\n', (1227, 1258), True, 'import numpy as np\n'), ((1280, 1319), 'numpy.array', 'np.array', (['[1.4355891]'], {'dtype': 'np.float32'}), '([1.4355891], dtype=np.float32)\n', (1288, 1319), True, 'import numpy as np\n'), ((1351, 1424), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (1359, 1424), True, 'import numpy as np\n'), ((1466, 1510), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]]]'], {'dtype': 'np.int32'}), '([[[1, 0], [0, 1]]], dtype=np.int32)\n', (1474, 1510), True, 'import numpy as np\n'), ((1554, 1614), 'numpy.array', 'np.array', (['[[[0.0, 0.6832], [0.8544, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0.0, 0.6832], [0.8544, 0.0]]], dtype=np.float32)\n', (1562, 1614), True, 'import numpy as np\n'), ((1635, 1674), 'numpy.array', 'np.array', (['[1.5375525]'], {'dtype': 'np.float32'}), '([1.5375525], dtype=np.float32)\n', (1643, 1674), True, 'import numpy as np\n'), ((1696, 1736), 'numpy.array', 'np.array', (['[0.76877624]'], {'dtype': 'np.float32'}), '([0.76877624], dtype=np.float32)\n', (1704, 1736), True, 'import numpy as np\n'), ((1768, 1968), 'numpy.array', 'np.array', (['[[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-0.26332688,\n 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, 0.38375184, -\n 0.2831992, -0.9833142, 0.387824]]'], {}), '([[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324], [-\n 0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176], [0.7332028, \n 0.38375184, -0.2831992, -0.9833142, 0.387824]])\n', (1776, 1968), True, 'import numpy as np\n'), ((2063, 2098), 'numpy.array', 'np.array', (['[3, 3, 4]'], {'dtype': 'np.int32'}), '([3, 3, 4], dtype=np.int32)\n', (2071, 2098), True, 'import numpy as np\n'), ((2115, 2166), 'numpy.array', 'np.array', (['[1.138, 1.7332, 1.4287]'], {'dtype': 'np.float32'}), '([1.138, 1.7332, 1.4287], dtype=np.float32)\n', (2123, 2166), True, 'import numpy as np\n'), ((2188, 2224), 'numpy.array', 'np.array', (['[4.2999]'], {'dtype': 'np.float32'}), '([4.2999], dtype=np.float32)\n', (2196, 2224), True, 'import numpy as np\n'), ((2246, 2282), 'numpy.array', 'np.array', (['[1.4333]'], {'dtype': 'np.float32'}), '([1.4333], dtype=np.float32)\n', (2254, 2282), True, 'import numpy as np\n'), ((2314, 2387), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (2322, 2387), True, 'import numpy as np\n'), ((2429, 2473), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]]]'], {'dtype': 'np.int32'}), '([[[1, 0], [0, 1]]], dtype=np.int32)\n', (2437, 2473), True, 'import numpy as np\n'), ((2490, 2556), 'numpy.array', 'np.array', (['[[[0.6882, 0.6832], [0.8544, 1.8006]]]'], {'dtype': 'np.float32'}), '([[[0.6882, 0.6832], [0.8544, 1.8006]]], dtype=np.float32)\n', (2498, 2556), True, 'import numpy as np\n'), ((2577, 2613), 'numpy.array', 'np.array', (['[4.0263]'], {'dtype': 'np.float32'}), '([4.0263], dtype=np.float32)\n', (2585, 2613), True, 'import numpy as np\n'), ((2635, 2671), 'numpy.array', 'np.array', (['[1.0066]'], {'dtype': 'np.float32'}), '([1.0066], dtype=np.float32)\n', (2643, 2671), True, 'import numpy as np\n'), ((2703, 2844), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]], [[[0.12, \n 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]], [[\n [0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (2711, 2844), True, 'import numpy as np\n'), ((2928, 2990), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]], [[1, 0], [0, 1]]]'], {'dtype': 'np.int32'}), '([[[1, 0], [0, 1]], [[1, 0], [0, 1]]], dtype=np.int32)\n', (2936, 2990), True, 'import numpy as np\n'), ((3007, 3115), 'numpy.array', 'np.array', (['[[[0.6882, 0.6832], [0.8544, 1.8006]], [[0.6882, 0.6832], [0.8544, 1.8006]]]'], {'dtype': 'np.float32'}), '([[[0.6882, 0.6832], [0.8544, 1.8006]], [[0.6882, 0.6832], [0.8544,\n 1.8006]]], dtype=np.float32)\n', (3015, 3115), True, 'import numpy as np\n'), ((3214, 3250), 'numpy.array', 'np.array', (['[8.0526]'], {'dtype': 'np.float32'}), '([8.0526], dtype=np.float32)\n', (3222, 3250), True, 'import numpy as np\n'), ((3272, 3308), 'numpy.array', 'np.array', (['[1.0066]'], {'dtype': 'np.float32'}), '([1.0066], dtype=np.float32)\n', (3280, 3308), True, 'import numpy as np\n'), ((3340, 3405), 'numpy.array', 'np.array', (['[[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]'], {}), '([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]])\n', (3348, 3405), True, 'import numpy as np\n'), ((3425, 3465), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0, 0, 1]], dtype=np.int32)\n', (3433, 3465), True, 'import numpy as np\n'), ((3482, 3544), 'numpy.array', 'np.array', (['[[0.6882, 0.6832, 0.8544, 1.8006]]'], {'dtype': 'np.float32'}), '([[0.6882, 0.6832, 0.8544, 1.8006]], dtype=np.float32)\n', (3490, 3544), True, 'import numpy as np\n'), ((3566, 3602), 'numpy.array', 'np.array', (['[4.0263]'], {'dtype': 'np.float32'}), '([4.0263], dtype=np.float32)\n', (3574, 3602), True, 'import numpy as np\n'), ((3624, 3660), 'numpy.array', 'np.array', (['[1.0066]'], {'dtype': 'np.float32'}), '([1.0066], dtype=np.float32)\n', (3632, 3660), True, 'import numpy as np\n'), ((3699, 3742), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3740, 3742), True, 'import oneflow.experimental as flow\n'), ((4036, 4084), 'oneflow.experimental.Tensor', 'flow.Tensor', (["sample['input']"], {'dtype': 'flow.float32'}), "(sample['input'], dtype=flow.float32)\n", (4047, 4084), True, 'import oneflow.experimental as flow\n'), ((4106, 4153), 'oneflow.experimental.Tensor', 'flow.Tensor', (["sample['target']"], {'dtype': 'flow.int32'}), "(sample['target'], dtype=flow.int32)\n", (4117, 4153), True, 'import oneflow.experimental as flow\n'), ((4174, 4241), 'oneflow.experimental.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {'reduction': 'None', 'ignore_index': 'ignore_index'}), '(reduction=None, ignore_index=ignore_index)\n', (4198, 4241), True, 'import oneflow.experimental as flow\n'), ((4381, 4449), 'oneflow.experimental.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""', 'ignore_index': 'ignore_index'}), "(reduction='sum', ignore_index=ignore_index)\n", (4405, 4449), True, 'import oneflow.experimental as flow\n'), ((4636, 4705), 'oneflow.experimental.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""', 'ignore_index': 'ignore_index'}), "(reduction='mean', ignore_index=ignore_index)\n", (4660, 4705), True, 'import oneflow.experimental as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import unittest import oneflow as flow import oneflow.unittest class MyModule(flow.nn.Module): def __init__(self, placement=None, sbp=None): super().__init__() w = flow.randn(10, 10, placement=placement, sbp=sbp) self.weight = flow.nn.Parameter(w) def forward(self, input): return flow._C.gather(self.weight, input, 0) class MyGraph(flow.nn.Graph): def __init__(self, module): super().__init__() self.m = module sgd = flow.optim.SGD(module.parameters(), lr=1e-3) self.add_optimizer(sgd, is_sparse=True) def build(self, input): result = self.m(input) result.mean().backward() def _rand_input(placement=None, sbp=None): generator = flow.Generator() generator.manual_seed(0) return flow.randint(0, 10, (8,), generator=generator, placement=placement, sbp=sbp) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") @flow.unittest.skip_unless_1n1d() class GraphSparseOptimizerTest(oneflow.unittest.TestCase): def test(test_case): PLC = flow.placement("cuda", ranks=[0]) SBP = flow.sbp.broadcast m = MyModule(PLC, SBP) graph = MyGraph(m) graph._compile(_rand_input(PLC, SBP)) sparse_optimizer_found = False for op in graph._full_graph_proto.net.op: # print("==>", op.name) if op.HasField("user_conf"): # print(" -->", op.user_conf.op_type_name) if op.user_conf.op_type_name == "indexed_slices_sgd_update": sparse_optimizer_found = True break test_case.assertTrue(sparse_optimizer_found) if __name__ == "__main__": unittest.main()
[ "oneflow.nn.Parameter", "oneflow.randint", "oneflow.Generator", "oneflow.randn", "oneflow._C.gather", "oneflow.unittest.skip_unless_1n1d", "oneflow.placement" ]
[((1552, 1584), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1582, 1584), True, 'import oneflow as flow\n'), ((1339, 1355), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1353, 1355), True, 'import oneflow as flow\n'), ((1396, 1472), 'oneflow.randint', 'flow.randint', (['(0)', '(10)', '(8,)'], {'generator': 'generator', 'placement': 'placement', 'sbp': 'sbp'}), '(0, 10, (8,), generator=generator, placement=placement, sbp=sbp)\n', (1408, 1472), True, 'import oneflow as flow\n'), ((1492, 1526), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1501, 1526), False, 'import os\n'), ((2321, 2336), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2334, 2336), False, 'import unittest\n'), ((787, 835), 'oneflow.randn', 'flow.randn', (['(10)', '(10)'], {'placement': 'placement', 'sbp': 'sbp'}), '(10, 10, placement=placement, sbp=sbp)\n', (797, 835), True, 'import oneflow as flow\n'), ((858, 878), 'oneflow.nn.Parameter', 'flow.nn.Parameter', (['w'], {}), '(w)\n', (875, 878), True, 'import oneflow as flow\n'), ((925, 962), 'oneflow._C.gather', 'flow._C.gather', (['self.weight', 'input', '(0)'], {}), '(self.weight, input, 0)\n', (939, 962), True, 'import oneflow as flow\n'), ((1683, 1716), 'oneflow.placement', 'flow.placement', (['"""cuda"""'], {'ranks': '[0]'}), "('cuda', ranks=[0])\n", (1697, 1716), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.chunk, """Splits a tensor into a specific number of chunks. Each chunk is a view of the input tensor. Last chunk will be bigger if the tensor size along the given dimension dim is not divisible by chunks. Args: input (oneflow.Tensor): The tensor to split. chunks (int): Number of chunks to return. dim (int): Dimension along which to split the tensor. Returns: List of Tensors. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.random.randn(5, 3, 6, 9).astype(np.float32) >>> input = flow.tensor(arr) >>> output = [] >>> chunks = 3 >>> output = flow.chunk(input, chunks=chunks, dim=2) >>> out_shape = [] >>> for i in range(0, chunks): ... out_shape.append(output[i].numpy().shape) >>> out_shape [(5, 3, 2, 9), (5, 3, 2, 9), (5, 3, 2, 9)] """, )
[ "oneflow.framework.docstr.utils.add_docstr" ]
[((660, 1657), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.chunk', '"""Splits a tensor into a specific number of chunks. Each chunk is a view of the input tensor. Last chunk will be bigger if the tensor size along the given dimension dim is not divisible by chunks.\n\n Args:\n input (oneflow.Tensor): The tensor to split.\n chunks (int): Number of chunks to return.\n dim (int): Dimension along which to split the tensor.\n\n Returns:\n List of Tensors.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> import numpy as np\n \n >>> arr = np.random.randn(5, 3, 6, 9).astype(np.float32)\n >>> input = flow.tensor(arr)\n >>> output = []\n >>> chunks = 3\n >>> output = flow.chunk(input, chunks=chunks, dim=2)\n >>> out_shape = []\n >>> for i in range(0, chunks):\n ... out_shape.append(output[i].numpy().shape)\n >>> out_shape\n [(5, 3, 2, 9), (5, 3, 2, 9), (5, 3, 2, 9)]\n\n """'], {}), '(oneflow.chunk,\n """Splits a tensor into a specific number of chunks. Each chunk is a view of the input tensor. Last chunk will be bigger if the tensor size along the given dimension dim is not divisible by chunks.\n\n Args:\n input (oneflow.Tensor): The tensor to split.\n chunks (int): Number of chunks to return.\n dim (int): Dimension along which to split the tensor.\n\n Returns:\n List of Tensors.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> import numpy as np\n \n >>> arr = np.random.randn(5, 3, 6, 9).astype(np.float32)\n >>> input = flow.tensor(arr)\n >>> output = []\n >>> chunks = 3\n >>> output = flow.chunk(input, chunks=chunks, dim=2)\n >>> out_shape = []\n >>> for i in range(0, chunks):\n ... out_shape.append(output[i].numpy().shape)\n >>> out_shape\n [(5, 3, 2, 9), (5, 3, 2, 9), (5, 3, 2, 9)]\n\n """\n )\n', (670, 1657), False, 'from oneflow.framework.docstr.utils import add_docstr\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow import numpy as np import sys import oneflow.typing as oft flow.config.gpu_device_num(4) func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.consistent_view()) if __name__ == "__main__": @flow.global_function(function_config=func_config) def test_job(x: oft.Numpy.Placeholder((10000,), dtype=flow.float)): return flow.eager_nccl_all_reduce( x, parallel_conf=""" device_tag: "gpu", device_name: "0:0-3" """, ) for _ in range(10): x = np.random.rand(10000).astype(np.float32) y = test_job(x).get() print(x) print(y)
[ "oneflow.FunctionConfig", "oneflow.scope.consistent_view", "oneflow.eager_nccl_all_reduce", "oneflow.global_function", "oneflow.config.gpu_device_num", "oneflow.typing.Numpy.Placeholder" ]
[((673, 702), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(4)'], {}), '(4)\n', (699, 702), True, 'import oneflow as flow\n'), ((718, 739), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (737, 739), True, 'import oneflow as flow\n'), ((815, 843), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (841, 843), True, 'import oneflow as flow\n'), ((879, 928), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (899, 928), True, 'import oneflow as flow\n'), ((1016, 1110), 'oneflow.eager_nccl_all_reduce', 'flow.eager_nccl_all_reduce', (['x'], {'parallel_conf': '""" device_tag: "gpu", device_name: "0:0-3" """'}), '(x, parallel_conf=\n \' device_tag: "gpu", device_name: "0:0-3" \')\n', (1042, 1110), True, 'import oneflow as flow\n'), ((949, 998), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10000,)'], {'dtype': 'flow.float'}), '((10000,), dtype=flow.float)\n', (970, 998), True, 'import oneflow.typing as oft\n'), ((1170, 1191), 'numpy.random.rand', 'np.random.rand', (['(10000)'], {}), '(10000)\n', (1184, 1191), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from oneflow.nn.common_types import _size_2_t from oneflow.nn.module import Module from oneflow.nn.modules.utils import _pair class Fold(Module): def __init__( self, output_size: _size_2_t, kernel_size: _size_2_t, dilation: _size_2_t = 1, padding: _size_2_t = 0, stride: _size_2_t = 1, ) -> None: r"""Combines an array of sliding local blocks into a large containing tensor, it also called `col2img`. Consider a batched :attr:`input` tensor containing sliding local blocks, e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})` is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})` spatial locations each containing a :math:`C`-channeled vector), and :math:`L` is the total number of blocks. (This is exactly the same specification as the output shape of :class:`~torch.nn.Unfold`.) This operation combines these local blocks into the large :attr:`output` tensor of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)` by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the arguments must satisfy .. math:: L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] % - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, Args: output_size (_size_2_t): The spatial dimension of output tensor. kernel_size (_size_2_t): The size of kernel. dilation (_size_2_t, optional): The dilation rate. Defaults to 1. padding (_size_2_t, optional): The padding value. Defaults to 0. stride (_size_2_t, optional): The stride of sliding window. Defaults to 1. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x_tensor = flow.Tensor(np.random.randn(1, 9, 16)) >>> fold = flow.nn.Fold(output_size=(4, 4), kernel_size=3, padding=1) >>> out = fold(x_tensor) >>> out.shape oneflow.Size([1, 1, 4, 4]) """ super(Fold, self).__init__() self.output_size = output_size self.kernel_size = _pair(kernel_size) self.dilation = _pair(dilation) self.padding = _pair(padding) self.stride = _pair(stride) def forward(self, input): return flow._C.fold( input, "channels_first", self.output_size, self.kernel_size, self.dilation, self.padding, self.stride, ) def extra_repr(self) -> str: return ( "output_size={output_size}, kernel_size={kernel_size}, " "dilation={dilation}, padding={padding}, stride={stride}".format( **self.__dict__ ) ) class Unfold(Module): def __init__( self, kernel_size: _size_2_t, dilation: _size_2_t = 1, padding: _size_2_t = 0, stride: _size_2_t = 1, ) -> None: r"""This op extracts elements in a local window from input tensor, it also called `img2col`. Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`, where :math:`N` is the batch dimension, :math:`C` is the channel dimension, and :math:`*` represent arbitrary spatial dimensions. This operation flattens each sliding :attr:`kernel_size`-sized block within the spatial dimensions of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output` tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where :math:`C \times \prod(\text{kernel\_size})` is the total number of values within each block (a block has :math:`\prod(\text{kernel\_size})` spatial locations each containing a :math:`C`-channeled vector), and :math:`L` is the total number of such blocks: .. math:: L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] % - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, where :math:`\text{spatial\_size}` is formed by the spatial dimensions of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial dimensions. Therefore, indexing :attr:`output` at the last dimension (column dimension) gives all values within a certain block. Args: kernel_size (_size_2_t): The size of kernel. dilation (_size_2_t, optional): The dilation rate. Defaults to 1. padding (_size_2_t, optional): The padding value. Defaults to 0. stride (_size_2_t, optional): The stride of sliding window. Defaults to 1. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x_tensor = flow.Tensor(np.random.randn(1, 1, 4, 4)) >>> unfold = flow.nn.Unfold(kernel_size=3, padding=1) >>> out = unfold(x_tensor) >>> out.shape oneflow.Size([1, 9, 16]) """ super(Unfold, self).__init__() self.kernel_size = _pair(kernel_size) self.dilation = _pair(dilation) self.padding = _pair(padding) self.stride = _pair(stride) def forward(self, input): return flow._C.unfold( input, "channels_first", self.kernel_size, self.dilation, self.padding, self.stride, ) def extra_repr(self) -> str: return ( "kernel_size={kernel_size}, dilation={dilation}, padding={padding}," " stride={stride}".format(**self.__dict__) ) if __name__ == "__main__": import doctest doctest.testmod(raise_on_error=True)
[ "oneflow._C.unfold", "oneflow.nn.modules.utils._pair", "oneflow._C.fold" ]
[((6737, 6773), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (6752, 6773), False, 'import doctest\n'), ((3087, 3105), 'oneflow.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (3092, 3105), False, 'from oneflow.nn.modules.utils import _pair\n'), ((3130, 3145), 'oneflow.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (3135, 3145), False, 'from oneflow.nn.modules.utils import _pair\n'), ((3169, 3183), 'oneflow.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (3174, 3183), False, 'from oneflow.nn.modules.utils import _pair\n'), ((3206, 3219), 'oneflow.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (3211, 3219), False, 'from oneflow.nn.modules.utils import _pair\n'), ((3266, 3385), 'oneflow._C.fold', 'flow._C.fold', (['input', '"""channels_first"""', 'self.output_size', 'self.kernel_size', 'self.dilation', 'self.padding', 'self.stride'], {}), "(input, 'channels_first', self.output_size, self.kernel_size,\n self.dilation, self.padding, self.stride)\n", (3278, 3385), True, 'import oneflow as flow\n'), ((6125, 6143), 'oneflow.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (6130, 6143), False, 'from oneflow.nn.modules.utils import _pair\n'), ((6168, 6183), 'oneflow.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (6173, 6183), False, 'from oneflow.nn.modules.utils import _pair\n'), ((6207, 6221), 'oneflow.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (6212, 6221), False, 'from oneflow.nn.modules.utils import _pair\n'), ((6244, 6257), 'oneflow.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (6249, 6257), False, 'from oneflow.nn.modules.utils import _pair\n'), ((6304, 6407), 'oneflow._C.unfold', 'flow._C.unfold', (['input', '"""channels_first"""', 'self.kernel_size', 'self.dilation', 'self.padding', 'self.stride'], {}), "(input, 'channels_first', self.kernel_size, self.dilation,\n self.padding, self.stride)\n", (6318, 6407), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow import oneflow.typing as tp from tsn_model import restsn import argparse import time import mmcv import os.path as osp import os import tempfile from collections import Counter import warnings warnings.filterwarnings("ignore", category=UserWarning) from video_dataset import * def parse_args(): parser = argparse.ArgumentParser(description='Test an action recognizer') parser.add_argument('--config', default = 'test_configs/TSN/tsn_kinetics400_2d_rgb_r50_seg3_f1s1.py', help='test config file path') parser.add_argument('--launcher', choices=['none', 'pytorch', 'mpi', 'slurm'], default='pytorch', help='job launcher') parser.add_argument('--out', help='output result file', default='default.pkl') parser.add_argument('--use_softmax', action='store_true', help='whether to use softmax score') # for oneflow parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False) parser.add_argument("--use_tensorrt", dest="use_tensorrt", action="store_true", default=False, required=False, help="inference with tensorrt") parser.add_argument("--model_load_dir", type=str, default='./output/save_model', required=False, help="model load directory") parser.add_argument("--log_dir", type=str, default="./output", required=False, help="log info save directory") parser.add_argument("-image_height", "--image_height", type=int, default=224, required=False) parser.add_argument("-image_width", "--image_width", type=int, default=224, required=False) args = parser.parse_args() return args args = parse_args() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) flow.config.gpu_device_num(args.gpu_num_per_node) if args.use_tensorrt: func_config.use_tensorrt(True) @flow.global_function(func_config) def tsn_eval_job(image:tp.Numpy.Placeholder((250,3,224,224))): features = restsn(image, trainable=False) return features class TSNInference(object): def __init__(self): check_point = flow.train.CheckPoint() check_point.load(args.model_load_dir) def inference(self, imgs): array = np.ascontiguousarray(imgs) print(array.shape) feature = tsn_eval_job(array).get() #print('feature',feature.numpy()) result = np.argmax(feature.numpy().flatten()) return result def multi_test(): global args predicts = [] labels = [] count = 0 # VideoDataset set config ann_file = "data/kinetics400/kinetics400_val_list_videos_small.txt" img_prefix = "data/kinetics400/videos_val" img_norm_cfg = {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True} anno_open = open(ann_file, 'r') anno_len = len(anno_open.readlines()) anno_open.close() oneflow_dataset = VideoDataset(ann_file, img_prefix, img_norm_cfg) flow.env.grpc_use_no_signal() flow.env.log_dir(args.log_dir) obj = TSNInference() wrong_count = 0 for i in range(anno_len): img_group, label = oneflow_dataset[i] #print(img_group) flow_result = obj.inference(img_group) if label!=flow_result: wrong_count = wrong_count +1 print(label,flow_result) if i % 100 == 0: print('data_batch {}'.format(i)) count = count + 1 precision = (anno_len - wrong_count)/anno_len return precision def main(): global args args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') final_precision = multi_test() print("precision is: {}".format(final_precision)) if __name__ == '__main__': main()
[ "oneflow.env.grpc_use_no_signal", "oneflow.FunctionConfig", "oneflow.train.CheckPoint", "oneflow.global_function", "oneflow.config.gpu_device_num", "oneflow.typing.Numpy.Placeholder", "oneflow.env.log_dir" ]
[((804, 859), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (827, 859), False, 'import warnings\n'), ((2244, 2265), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2263, 2265), True, 'import oneflow as flow\n'), ((2308, 2357), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (2334, 2357), True, 'import oneflow as flow\n'), ((2421, 2454), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (2441, 2454), True, 'import oneflow as flow\n'), ((920, 984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test an action recognizer"""'}), "(description='Test an action recognizer')\n", (943, 984), False, 'import argparse\n'), ((2533, 2563), 'tsn_model.restsn', 'restsn', (['image'], {'trainable': '(False)'}), '(image, trainable=False)\n', (2539, 2563), False, 'from tsn_model import restsn\n'), ((3503, 3532), 'oneflow.env.grpc_use_no_signal', 'flow.env.grpc_use_no_signal', ([], {}), '()\n', (3530, 3532), True, 'import oneflow as flow\n'), ((3537, 3567), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (3553, 3567), True, 'import oneflow as flow\n'), ((2478, 2518), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(250, 3, 224, 224)'], {}), '((250, 3, 224, 224))\n', (2498, 2518), True, 'import oneflow.typing as tp\n'), ((2659, 2682), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2680, 2682), True, 'import oneflow as flow\n')]
import matplotlib.pyplot as plt import matplotlib.ticker as ticker import math import time import oneflow as flow # refer to: https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html teacher_forcing_ratio = 0.5 SOS_token = 0 EOS_token = 1 device = "cuda" MAX_LENGTH = 10 eng_prefixes = ( "i am ", "i m ", "he is", "he s ", "she is", "she s ", "you are", "you re ", "we are", "we re ", "they are", "they re ", ) def showAttention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap="bone") fig.colorbar(cax) # Set up axes ax.set_xticklabels([""] + input_sentence.split(" ") + ["<EOS>"], rotation=90) ax.set_yticklabels([""] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def showPlot(points): plt.figure() fig, ax = plt.subplots() # this locator puts ticks at regular intervals loc = ticker.MultipleLocator(base=0.2) ax.yaxis.set_major_locator(loc) plt.plot(points) plt.savefig("./loss_oneflow.jpg") plt.show() def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return "%dm %ds" % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return "%s (- %s)" % (asMinutes(s), asMinutes(rs)) def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(" ")] def tensorFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) indexes.append(EOS_token) return flow.tensor(indexes, dtype=flow.long, device=device).reshape([-1, 1]) def tensorsFromPair(pair, input_lang, output_lang): input_tensor = tensorFromSentence(input_lang, pair[0]) target_tensor = tensorFromSentence(output_lang, pair[1]) return (input_tensor, target_tensor)
[ "oneflow.tensor" ]
[((621, 633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (631, 633), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1059, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1094, 1106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1104, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1136), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1134, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1232), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(0.2)'}), '(base=0.2)\n', (1222, 1232), True, 'import matplotlib.ticker as ticker\n'), ((1275, 1291), 'matplotlib.pyplot.plot', 'plt.plot', (['points'], {}), '(points)\n', (1283, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1330), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./loss_oneflow.jpg"""'], {}), "('./loss_oneflow.jpg')\n", (1308, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1344, 1346), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1397), 'math.floor', 'math.floor', (['(s / 60)'], {}), '(s / 60)\n', (1389, 1397), False, 'import math\n'), ((1493, 1504), 'time.time', 'time.time', ([], {}), '()\n', (1502, 1504), False, 'import time\n'), ((958, 983), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (980, 983), True, 'import matplotlib.ticker as ticker\n'), ((1017, 1042), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (1039, 1042), True, 'import matplotlib.ticker as ticker\n'), ((1876, 1928), 'oneflow.tensor', 'flow.tensor', (['indexes'], {'dtype': 'flow.long', 'device': 'device'}), '(indexes, dtype=flow.long, device=device)\n', (1887, 1928), True, 'import oneflow as flow\n')]
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import oneflow as flow from flowvision import transforms from flowvision.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from PIL import ImageFilter, ImageOps from libai.config import LazyCall class GaussianBlur(object): """Gaussian blur augmentation from SimCLR: https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[0.1, 2.0]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x class Solarize(object): """Solarize augmentation from BYOL: https://arxiv.org/abs/2006.07733""" def __call__(self, x): return ImageOps.solarize(x) # follow BYOL's augmentation recipe: https://arxiv.org/abs/2006.07733 augmentation1 = [ LazyCall(transforms.RandomResizedCrop)(size=224, scale=(0.2, 1.0)), LazyCall(transforms.RandomApply)( transforms=[ LazyCall(transforms.ColorJitter)( brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1 ) # not strengthened ], p=0.8, ), # TODO: Add RandomGrayscale # LazyCall(transforms.RandomGrayscale)(p=0.2), LazyCall(transforms.RandomApply)(transforms=[LazyCall(GaussianBlur)(sigma=[0.1, 2.0])], p=1.0), LazyCall(transforms.RandomHorizontalFlip)(), LazyCall(transforms.ToTensor)(), LazyCall(transforms.Normalize)(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), ] augmentation2 = [ LazyCall(transforms.RandomResizedCrop)(size=224, scale=(0.2, 1.0)), LazyCall(transforms.RandomApply)( transforms=[ LazyCall(transforms.ColorJitter)( brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1 ) # not strengthened ], p=0.8, ), # TODO: Add RandomGrayscale # LazyCall(transforms.RandomGrayscale)(p=0.2), LazyCall(transforms.RandomApply)(transforms=[LazyCall(GaussianBlur)(sigma=[0.1, 2.0])], p=1.0), LazyCall(transforms.RandomApply)(transforms=[LazyCall(Solarize)()], p=0.2), LazyCall(transforms.RandomHorizontalFlip)(), LazyCall(transforms.ToTensor)(), LazyCall(transforms.Normalize)(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), ] class TwoCropsTransform: """Take two random crops of one image""" def __init__(self, base_transform1, base_transform2): self.base_transform1 = base_transform1 self.base_transform2 = base_transform2 def __call__(self, x): im1 = self.base_transform1(x) im2 = self.base_transform2(x) return flow.cat((im1, im2), dim=0)
[ "oneflow.cat" ]
[((1066, 1110), 'random.uniform', 'random.uniform', (['self.sigma[0]', 'self.sigma[1]'], {}), '(self.sigma[0], self.sigma[1])\n', (1080, 1110), False, 'import random\n'), ((1334, 1354), 'PIL.ImageOps.solarize', 'ImageOps.solarize', (['x'], {}), '(x)\n', (1351, 1354), False, 'from PIL import ImageFilter, ImageOps\n'), ((1449, 1487), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomResizedCrop'], {}), '(transforms.RandomResizedCrop)\n', (1457, 1487), False, 'from libai.config import LazyCall\n'), ((1521, 1553), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomApply'], {}), '(transforms.RandomApply)\n', (1529, 1553), False, 'from libai.config import LazyCall\n'), ((1846, 1878), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomApply'], {}), '(transforms.RandomApply)\n', (1854, 1878), False, 'from libai.config import LazyCall\n'), ((1946, 1987), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomHorizontalFlip'], {}), '(transforms.RandomHorizontalFlip)\n', (1954, 1987), False, 'from libai.config import LazyCall\n'), ((1995, 2024), 'libai.config.LazyCall', 'LazyCall', (['transforms.ToTensor'], {}), '(transforms.ToTensor)\n', (2003, 2024), False, 'from libai.config import LazyCall\n'), ((2032, 2062), 'libai.config.LazyCall', 'LazyCall', (['transforms.Normalize'], {}), '(transforms.Normalize)\n', (2040, 2062), False, 'from libai.config import LazyCall\n'), ((2143, 2181), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomResizedCrop'], {}), '(transforms.RandomResizedCrop)\n', (2151, 2181), False, 'from libai.config import LazyCall\n'), ((2215, 2247), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomApply'], {}), '(transforms.RandomApply)\n', (2223, 2247), False, 'from libai.config import LazyCall\n'), ((2540, 2572), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomApply'], {}), '(transforms.RandomApply)\n', (2548, 2572), False, 'from libai.config import LazyCall\n'), ((2640, 2672), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomApply'], {}), '(transforms.RandomApply)\n', (2648, 2672), False, 'from libai.config import LazyCall\n'), ((2720, 2761), 'libai.config.LazyCall', 'LazyCall', (['transforms.RandomHorizontalFlip'], {}), '(transforms.RandomHorizontalFlip)\n', (2728, 2761), False, 'from libai.config import LazyCall\n'), ((2769, 2798), 'libai.config.LazyCall', 'LazyCall', (['transforms.ToTensor'], {}), '(transforms.ToTensor)\n', (2777, 2798), False, 'from libai.config import LazyCall\n'), ((2806, 2836), 'libai.config.LazyCall', 'LazyCall', (['transforms.Normalize'], {}), '(transforms.Normalize)\n', (2814, 2836), False, 'from libai.config import LazyCall\n'), ((3238, 3265), 'oneflow.cat', 'flow.cat', (['(im1, im2)'], {'dim': '(0)'}), '((im1, im2), dim=0)\n', (3246, 3265), True, 'import oneflow as flow\n'), ((1132, 1170), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'sigma'}), '(radius=sigma)\n', (1156, 1170), False, 'from PIL import ImageFilter, ImageOps\n'), ((1588, 1620), 'libai.config.LazyCall', 'LazyCall', (['transforms.ColorJitter'], {}), '(transforms.ColorJitter)\n', (1596, 1620), False, 'from libai.config import LazyCall\n'), ((1891, 1913), 'libai.config.LazyCall', 'LazyCall', (['GaussianBlur'], {}), '(GaussianBlur)\n', (1899, 1913), False, 'from libai.config import LazyCall\n'), ((2282, 2314), 'libai.config.LazyCall', 'LazyCall', (['transforms.ColorJitter'], {}), '(transforms.ColorJitter)\n', (2290, 2314), False, 'from libai.config import LazyCall\n'), ((2585, 2607), 'libai.config.LazyCall', 'LazyCall', (['GaussianBlur'], {}), '(GaussianBlur)\n', (2593, 2607), False, 'from libai.config import LazyCall\n'), ((2685, 2703), 'libai.config.LazyCall', 'LazyCall', (['Solarize'], {}), '(Solarize)\n', (2693, 2703), False, 'from libai.config import LazyCall\n')]
import oneflow as flow import numpy as np import cv2 from PIL import Image import matplotlib.pyplot as plt np.set_printoptions(threshold=np.inf) def watch_handler(y: flow.typing.Numpy): sub_img = y[0,0,:,:] sub_img = 1.0 / (1+np.exp(-1 * sub_img)) sub_img = np.round(sub_img*255) # cv2.imwrite('sub_image.jpg', sub_img) print("out", np.sum(y)) def watch_step(y: flow.typing.Numpy): print("step", np.sum(y)) def watch_weight(y: flow.typing.Numpy): print("weight", np.sum(y)) class DLABuilder(object): def __init__(self, levels, channels, weight_regularizer, trainable=True, training=True, channel_last=False): self.data_format = "NHWC" if channel_last else "NCHW" self.weight_initializer = flow.variance_scaling_initializer(2, 'fan_in', 'random_normal', data_format=self.data_format) self.weight_regularizer = weight_regularizer self.trainable = trainable self.training = training self.levels = levels self.channels = channels def _conv2d( self, name, input, filters, kernel_size, strides=1, padding="SAME", dilations=1, bias=0 ): # There are different shapes of weight metric between 'NCHW' and 'NHWC' mode if self.data_format == "NHWC": shape = (filters, kernel_size, kernel_size, input.shape[3]) else: shape = (filters, input.shape[1], kernel_size, kernel_size) weight = flow.get_variable( name + "-weight", shape=shape, dtype=input.dtype, initializer=self.weight_initializer, regularizer=self.weight_regularizer, model_name="weight", trainable=self.trainable, ) output = flow.nn.conv2d(input, weight, strides, padding, self.data_format, dilations, name=name + "_conv") # flow.watch(weight, watch_step) if bias != 0: bias_weight = flow.get_variable( name + "_bias", shape=(filters,), dtype=input.dtype, initializer=flow.constant_initializer(bias), regularizer=self.weight_regularizer, ) output = flow.nn.bias_add(output, bias_weight, data_format=self.data_format) return output def _batch_norm(self, inputs, name=None): axis = 1 if self.data_format == "NHWC": axis = 3 return flow.layers.batch_normalization(inputs=inputs, axis=axis, name=name+"_bn") def _conv2d_transpose_layer(self, name, # name of layer input, # input of layer kernel_size, # kernel size of filters in_channels, out_channels, strides=1, # strides size padding="SAME", # padding is SAME or VALID data_format="NCHW", # N:batch size C: Number of channels H:height W:width dilations=1, trainable=False, # trainable is True or False use_bias=False, # use_bias is True or False bias_initializer=flow.zeros_initializer() # flow.random_uniform_initializer(), ): dilations = 1 # weights in convolution layers weight = flow.get_variable( name + "-weight", shape=(in_channels, out_channels, kernel_size, kernel_size), dtype=flow.float, initializer=flow.variance_scaling_initializer(distribution="random_normal", data_format="NCHW"), regularizer=flow.regularizers.l2(0.0005), trainable=False, ) out_shape = [input.shape[0], out_channels, input.shape[2] * strides, input.shape[3] * strides] output = flow.nn.conv2d_transpose(input, weight, strides=strides, output_shape=out_shape, dilations=dilations, padding=padding, data_format=data_format) # deconvolution layer # bias in convolution layers if use_bias: bias = flow.get_variable( name + "-bias", shape=(out_channels,), dtype=input.dtype, initializer=bias_initializer, # initialise bias regularizer=flow.regularizers.l2(0.0005) # bias regularizer ) # add bias if use_bias is true output = flow.nn.bias_add(output, bias, data_format) return output def base_layer(self, input): conv = self._conv2d("base_layer", input, self.channels[0], 7, 1) conv_bn = self._batch_norm(conv, "base_layer") conv_bn_relu = flow.nn.relu(conv_bn) return conv_bn_relu def _make_conv_level(self, level_name, x, planes, convs, stride=1, dilation=1): for i in range(convs): layer_name = "%s_%d" % (level_name, i) x = self._conv2d(layer_name, x, planes, 3, strides=stride if i == 0 else 1, dilations=dilation) x = self._batch_norm(x, layer_name) x = flow.nn.relu(x) return x def _block(self, name, x, inplanes, planes, stride=1, dilation=1, residual=None): if residual is None: residual = x out = self._conv2d(name+"_1", x, planes, kernel_size=3, strides=stride, dilations=dilation) out = self._batch_norm(out, name+"_1") out = flow.nn.relu(out) out = self._conv2d(name+"_2", out, planes, kernel_size=3, strides=1, dilations=dilation) out = self._batch_norm(out, name+"_2") out = flow.math.add(out, residual, name=name+"_block_neck") out = flow.nn.relu(out) return out def _Root(self, name, tree2, tree1, children, in_channels, out_channels, kernel_size, residual): if children.__len__() == 0: x = flow.concat([tree2, tree1], 1) else: x = flow.concat([tree2, tree1], 1) for i in range(children.__len__()): x = flow.concat([x, children[i]], 1) x = self._conv2d(name, x, out_channels, 1, 1) x = self._batch_norm(x, name=name) if residual: x = flow.math.add(x, children[0], name=name+"_root_neck") x = flow.nn.relu(x) return x def _Tree(self, name, x, levels, in_channels, out_channels, stride=1, level_root=False, root_dim=0, root_kernel_size=1, dilation=1, root_residual=False, residual=None, children=None): children = [] if children is None else children if stride > 1: bottom = flow.nn.max_pool2d(x, stride, strides=stride, padding="SAME") else: bottom = x if in_channels != out_channels: residual = self._conv2d(name+"_res", bottom, out_channels, kernel_size=1, strides=1) residual = self._batch_norm(residual, name=name+"_res") else: residual = bottom if root_dim == 0: root_dim = 2 * out_channels if level_root: children.append(bottom) root_dim += in_channels if levels == 1: tree1 = self._block(name+"_tree1", x, in_channels, out_channels, stride, dilation=dilation, residual=residual) tree2 = self._block(name+"_tree2", tree1, out_channels, out_channels, 1, dilation=dilation, residual=residual) out = self._Root(name+"_root", tree2, tree1, children, root_dim, out_channels, root_kernel_size, root_residual) else: tree1 = self._Tree(name+"_tree1", x, levels - 1, in_channels, out_channels, stride, root_dim=0, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) children.append(tree1) out = self._Tree(name+"_tree2", tree1, levels - 1, out_channels, out_channels, 1, root_dim=root_dim + out_channels, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual, children=children) return out def dla(self, x, residual_root=False): y = [] x = self.base_layer(x) x = self._make_conv_level("level0", x, self.channels[0], self.levels[0]) y.append(x) x = self._make_conv_level("level1", x, self.channels[1], self.levels[1], stride=2) y.append(x) x = self._Tree("level2", x, self.levels[2], self.channels[1], self.channels[2], stride=2, level_root=False, root_residual=residual_root) y.append(x) x = self._Tree("level3", x, self.levels[3], self.channels[2], self.channels[3], stride=2, level_root=True, root_residual=residual_root) y.append(x) x = self._Tree("level4", x, self.levels[4], self.channels[3], self.channels[4], stride=2, level_root=True, root_residual=residual_root) y.append(x) x = self._Tree("level5", x, self.levels[5], self.channels[4], self.channels[5], stride=2, level_root=True, root_residual=residual_root) y.append(x) return y def backbone(self, x, residual_root=False): return self.dla(x) def proj(self, name, x, chi, out_dim): x = self._conv2d(name+"_proj", x, out_dim, 1, strides=1) x = self._batch_norm(x, name+"_proj") return flow.nn.relu(x) def node(self, name, x, chi, out_dim): x = self._conv2d(name+"_node", x, out_dim, kernel_size=3, strides=1) x = self._batch_norm(x, name+"_node") return flow.nn.relu(x) def IDA_UP(self, name, x, startp, endp, out_dim, channels, up_factors): for i in range(startp + 1, endp): name = "%s_%d" % (name, i) project = self.proj(name, x[i], channels[i - startp], out_dim) x[i] = self._conv2d_transpose_layer(name + "_transpose", project, int(up_factors[i - startp]) * 2, in_channels=out_dim, out_channels=out_dim, strides=int(up_factors[i - startp])) name_node_add = "%s%d%d" % (name+"_add", i, i-1) node_add = flow.math.add(x[i], x[i - 1], name=name_node_add) x[i] = self.node(name, node_add, out_dim, out_dim) def DLA_UP(self, x, first_level, channels, scales, in_channels=None): out = [x[-1]] scales = np.array(scales, dtype=int) if in_channels is None: in_channels = channels for i in range(len(x) - first_level - 1): j = -i - 2 name = "%s_%d" % ("dla_up", i) self.IDA_UP(name, x, len(x) - i - 2, len(x), channels[j], in_channels[j:], scales[j:] // scales[j]) scales[j + 1:] = scales[j] in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] out.insert(0, x[-1]) return out def _head(self, name, x, classes, head_conv): x = self._conv2d(name+"_head_1", x, head_conv, kernel_size=3, padding="SAME") x = flow.nn.relu(x) if name == "hm": x = self._conv2d(name + "_head_2", x, classes, kernel_size=1, strides=1, bias=-2.19) else: x = self._conv2d(name+"_head_2", x, classes, kernel_size=1, strides=1) return x def DLA34(images, args, trainable=True, training=True): weight_regularizer = flow.regularizers.l2(args.wd) if args.wd > 0.0 and args.wd < 1.0 else None levels = [1, 1, 1, 2, 2, 1] channels = [16, 32, 64, 128, 256, 512] down_ratio = 4 first_level = int(np.log2(down_ratio)) last_level = 5 scales = [2 ** i for i in range(len(channels[first_level:]))] out_channels = channels[first_level] builder = DLABuilder(levels, channels, weight_regularizer, trainable, training) with flow.scope.namespace("DLA34"): # flow.watch(images, watch_handler) backbone = builder.backbone(images) x = backbone[-1] x = flow.nn.avg_pool2d(x, 7, strides=1, padding="VALID") x = flow.layers.dense(inputs=flow.reshape(x, (x.shape[0], -1)), units=1000, use_bias=True, kernel_initializer=flow.random_normal_initializer(stddev=0.01), bias_initializer=flow.zeros_initializer(), trainable=True) return x def DLASeg(image, args, trainable=True, training=True): weight_regularizer = flow.regularizers.l2(0.00005) levels = [1, 1, 1, 2, 2, 1] channels = [16, 32, 64, 128, 256, 512] down_ratio = 4 first_level = int(np.log2(down_ratio)) last_level = 5 scales = [2 ** i for i in range(len(channels[first_level:]))] out_channels = channels[first_level] builder = DLABuilder(levels, channels, weight_regularizer, trainable, training) with flow.scope.namespace("DLA34"): backbone = builder.backbone(image) dla_up = builder.DLA_UP(backbone, first_level, channels[first_level:], scales) y = [] for i in range(last_level - first_level): y.append(dla_up[i]) builder.IDA_UP("IDA_UP", y, 0, len(y), out_channels, channels[first_level:last_level], [2 **i for i in range(last_level - first_level)]) z = {} head_conv = 256 z["hm"] = builder._head("hm", y[-1], 1, head_conv) z["wh"] = builder._head("wh", y[-1], 2, head_conv) z["id"] = builder._head("id", y[-1], 512, head_conv) z["reg"]= builder._head("reg", y[-1], 2, head_conv) return z def CenterNet(image, args, trainable=True, training=True): weight_regularizer = flow.regularizers.l2(0.00005) levels = [1, 1, 1, 2, 2, 1] channels = [16, 32, 64, 128, 256, 512] down_ratio = 4 first_level = int(np.log2(down_ratio)) last_level = 5 scales = [2 ** i for i in range(len(channels[first_level:]))] out_channels = channels[first_level] builder = DLABuilder(levels, channels, weight_regularizer, trainable, training) with flow.scope.namespace("DLA34"): backbone = builder.backbone(image) dla_up = builder.DLA_UP(backbone, first_level, channels[first_level:], scales) y = [] for i in range(last_level - first_level): y.append(dla_up[i]) builder.IDA_UP("IDA_UP", y, 0, len(y), out_channels, channels[first_level:last_level], [2 **i for i in range(last_level - first_level)]) z = {} head_conv = 256 z["hm"] = builder._head("hm", y[-1], 80, head_conv) z["wh"] = builder._head("wh", y[-1], 2, head_conv) z["reg"]= builder._head("reg", y[-1], 2, head_conv) return z
[ "oneflow.nn.relu", "oneflow.get_variable", "oneflow.zeros_initializer", "oneflow.variance_scaling_initializer", "oneflow.nn.bias_add", "oneflow.reshape", "oneflow.concat", "oneflow.nn.conv2d_transpose", "oneflow.math.add", "oneflow.nn.avg_pool2d", "oneflow.constant_initializer", "oneflow.layers.batch_normalization", "oneflow.nn.conv2d", "oneflow.scope.namespace", "oneflow.regularizers.l2", "oneflow.nn.max_pool2d", "oneflow.random_normal_initializer" ]
[((108, 145), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (127, 145), True, 'import numpy as np\n'), ((271, 294), 'numpy.round', 'np.round', (['(sub_img * 255)'], {}), '(sub_img * 255)\n', (279, 294), True, 'import numpy as np\n'), ((12821, 12848), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['(5e-05)'], {}), '(5e-05)\n', (12841, 12848), True, 'import oneflow as flow\n'), ((14017, 14044), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['(5e-05)'], {}), '(5e-05)\n', (14037, 14044), True, 'import oneflow as flow\n'), ((354, 363), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (360, 363), True, 'import numpy as np\n'), ((422, 431), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (428, 431), True, 'import numpy as np\n'), ((494, 503), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (500, 503), True, 'import numpy as np\n'), ((741, 839), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {'data_format': 'self.data_format'}), "(2, 'fan_in', 'random_normal', data_format\n =self.data_format)\n", (774, 839), True, 'import oneflow as flow\n'), ((1606, 1803), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'shape', 'dtype': 'input.dtype', 'initializer': 'self.weight_initializer', 'regularizer': 'self.weight_regularizer', 'model_name': '"""weight"""', 'trainable': 'self.trainable'}), "(name + '-weight', shape=shape, dtype=input.dtype,\n initializer=self.weight_initializer, regularizer=self.\n weight_regularizer, model_name='weight', trainable=self.trainable)\n", (1623, 1803), True, 'import oneflow as flow\n'), ((1908, 2009), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'self.data_format', 'dilations'], {'name': "(name + '_conv')"}), "(input, weight, strides, padding, self.data_format, dilations,\n name=name + '_conv')\n", (1922, 2009), True, 'import oneflow as flow\n'), ((2594, 2670), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'name': "(name + '_bn')"}), "(inputs=inputs, axis=axis, name=name + '_bn')\n", (2625, 2670), True, 'import oneflow as flow\n'), ((3474, 3498), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3496, 3498), True, 'import oneflow as flow\n'), ((4127, 4275), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': 'strides', 'output_shape': 'out_shape', 'dilations': 'dilations', 'padding': 'padding', 'data_format': 'data_format'}), '(input, weight, strides=strides, output_shape=\n out_shape, dilations=dilations, padding=padding, data_format=data_format)\n', (4151, 4275), True, 'import oneflow as flow\n'), ((5053, 5074), 'oneflow.nn.relu', 'flow.nn.relu', (['conv_bn'], {}), '(conv_bn)\n', (5065, 5074), True, 'import oneflow as flow\n'), ((5778, 5795), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (5790, 5795), True, 'import oneflow as flow\n'), ((5956, 6011), 'oneflow.math.add', 'flow.math.add', (['out', 'residual'], {'name': "(name + '_block_neck')"}), "(out, residual, name=name + '_block_neck')\n", (5969, 6011), True, 'import oneflow as flow\n'), ((6024, 6041), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (6036, 6041), True, 'import oneflow as flow\n'), ((6610, 6625), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (6622, 6625), True, 'import oneflow as flow\n'), ((9753, 9768), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (9765, 9768), True, 'import oneflow as flow\n'), ((9951, 9966), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (9963, 9966), True, 'import oneflow as flow\n'), ((10798, 10825), 'numpy.array', 'np.array', (['scales'], {'dtype': 'int'}), '(scales, dtype=int)\n', (10806, 10825), True, 'import numpy as np\n'), ((11435, 11450), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (11447, 11450), True, 'import oneflow as flow\n'), ((11770, 11799), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['args.wd'], {}), '(args.wd)\n', (11790, 11799), True, 'import oneflow as flow\n'), ((11962, 11981), 'numpy.log2', 'np.log2', (['down_ratio'], {}), '(down_ratio)\n', (11969, 11981), True, 'import numpy as np\n'), ((12203, 12232), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""DLA34"""'], {}), "('DLA34')\n", (12223, 12232), True, 'import oneflow as flow\n'), ((12360, 12412), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['x', '(7)'], {'strides': '(1)', 'padding': '"""VALID"""'}), "(x, 7, strides=1, padding='VALID')\n", (12378, 12412), True, 'import oneflow as flow\n'), ((12968, 12987), 'numpy.log2', 'np.log2', (['down_ratio'], {}), '(down_ratio)\n', (12975, 12987), True, 'import numpy as np\n'), ((13209, 13238), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""DLA34"""'], {}), "('DLA34')\n", (13229, 13238), True, 'import oneflow as flow\n'), ((14164, 14183), 'numpy.log2', 'np.log2', (['down_ratio'], {}), '(down_ratio)\n', (14171, 14183), True, 'import numpy as np\n'), ((14405, 14434), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""DLA34"""'], {}), "('DLA34')\n", (14425, 14434), True, 'import oneflow as flow\n'), ((235, 255), 'numpy.exp', 'np.exp', (['(-1 * sub_img)'], {}), '(-1 * sub_img)\n', (241, 255), True, 'import numpy as np\n'), ((2365, 2432), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias_weight'], {'data_format': 'self.data_format'}), '(output, bias_weight, data_format=self.data_format)\n', (2381, 2432), True, 'import oneflow as flow\n'), ((4801, 4844), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (4817, 4844), True, 'import oneflow as flow\n'), ((5442, 5457), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (5454, 5457), True, 'import oneflow as flow\n'), ((6216, 6246), 'oneflow.concat', 'flow.concat', (['[tree2, tree1]', '(1)'], {}), '([tree2, tree1], 1)\n', (6227, 6246), True, 'import oneflow as flow\n'), ((6277, 6307), 'oneflow.concat', 'flow.concat', (['[tree2, tree1]', '(1)'], {}), '([tree2, tree1], 1)\n', (6288, 6307), True, 'import oneflow as flow\n'), ((6544, 6599), 'oneflow.math.add', 'flow.math.add', (['x', 'children[0]'], {'name': "(name + '_root_neck')"}), "(x, children[0], name=name + '_root_neck')\n", (6557, 6599), True, 'import oneflow as flow\n'), ((6948, 7009), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['x', 'stride'], {'strides': 'stride', 'padding': '"""SAME"""'}), "(x, stride, strides=stride, padding='SAME')\n", (6966, 7009), True, 'import oneflow as flow\n'), ((10571, 10620), 'oneflow.math.add', 'flow.math.add', (['x[i]', 'x[i - 1]'], {'name': 'name_node_add'}), '(x[i], x[i - 1], name=name_node_add)\n', (10584, 10620), True, 'import oneflow as flow\n'), ((3828, 3916), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', ([], {'distribution': '"""random_normal"""', 'data_format': '"""NCHW"""'}), "(distribution='random_normal', data_format\n ='NCHW')\n", (3861, 3916), True, 'import oneflow as flow\n'), ((3937, 3965), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (3957, 3965), True, 'import oneflow as flow\n'), ((6376, 6408), 'oneflow.concat', 'flow.concat', (['[x, children[i]]', '(1)'], {}), '([x, children[i]], 1)\n', (6387, 6408), True, 'import oneflow as flow\n'), ((12450, 12483), 'oneflow.reshape', 'flow.reshape', (['x', '(x.shape[0], -1)'], {}), '(x, (x.shape[0], -1))\n', (12462, 12483), True, 'import oneflow as flow\n'), ((12591, 12634), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (12621, 12634), True, 'import oneflow as flow\n'), ((12683, 12707), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (12705, 12707), True, 'import oneflow as flow\n'), ((2244, 2275), 'oneflow.constant_initializer', 'flow.constant_initializer', (['bias'], {}), '(bias)\n', (2269, 2275), True, 'import oneflow as flow\n'), ((4674, 4702), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (4694, 4702), True, 'import oneflow as flow\n')]
#-*- coding:utf-8 -*- """ Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Version:0.0.1 # Date:15/10/2020 # Author: <NAME> (<EMAIL>) """ import cv2 import numpy as np from model import faceSeg import oneflow as flow import os import time from scipy.ndimage import * from glob import glob import argparse # arguments def parse_args(): parser = argparse.ArgumentParser(description='Face segmentation') # for oneflow parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False) parser.add_argument("--model_load_dir", type=str, default='./faceseg_model', required=False, help="model load directory") parser.add_argument("--image_dir", type=str, default='./data/example/', required=False, help="demo examples directory") parser.add_argument("--img_height", type=int, default=256, required=False) parser.add_argument("--img_width", type=int, default=256, required=False) parser.add_argument("--jaccard_weight", type=float, default=1 , required=False, help='jaccard weight for loss, a float between 0 and 1.') args = parser.parse_args() return args # test config args = parse_args() func_config = flow.function_config() func_config.default_data_type(flow.float) os.environ['CUDA_VISIBLE_DEVICES'] = '0' t_l = [] img_height = args.img_height img_width = args.img_width jaccard_weight = args.jaccard_weight model_load_pth = args.model_load_dir img_dir = args.image_dir img_pth = glob(img_dir+"*") name_l = [ img.split('/')[-1]for img in img_pth] delta_t = [] smooth = True def plt_mask(name, img_dir): # segment face using model img_path = img_dir + name # path of image feature = faceSeg(img_path, model_load_pth) feature1 = np.squeeze(feature.numpy()) # reshape from (1,1,size,size) to (size,size) time_1 = time.time() # filter mask contour if smooth: feature1 = median_filter(feature1, size=5) time_2 = time.time() print(f'Smooth time: {time_2 - time_1} \n') t_l.append(time_2 - time_1) face = np.zeros((img_height, img_width, 3), dtype=np.uint8) # face iamge, bgr image src = cv2.imread(img_path) # read source image img_test1 = cv2.resize(src, (img_height, img_width)) # resize image image = img_test1 # Mask replace speedup feature1 = feature1 >0. # extract mask face[feature1==1] = image[feature1==1] if not os.path.exists(model_dir + './demo/'): os.mkdir(model_dir + 'demo/') cv2.imwrite(model_dir + 'demo/' + name, face) time_1 = time.time() # load model parameters check_point = flow.train.CheckPoint() check_point.load(model_load_pth) time_2 = time.time() print(f'Model load time: {time_2 - time_1} \n') for n in name_l: time_1 = time.time() plt_mask(n, img_dir) time_2 = time.time() print(f'time: {time_2 - time_1} \n') delta_t.append(time_2 - time_1) print('Inf time for %i images: %.4fs | Average: %.4fs '%(len(name_l), sum(np.array(t_l)), np.array(t_l).mean())) print('Execution time for %i images: %.4fs | Average: %.4fs '%(len(name_l), sum(np.array(delta_t)), np.mean(np.array(delta_t))))
[ "oneflow.function_config", "oneflow.train.CheckPoint" ]
[((1692, 1714), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (1712, 1714), True, 'import oneflow as flow\n'), ((1974, 1993), 'glob.glob', 'glob', (["(img_dir + '*')"], {}), "(img_dir + '*')\n", (1978, 1993), False, 'from glob import glob\n'), ((3040, 3051), 'time.time', 'time.time', ([], {}), '()\n', (3049, 3051), False, 'import time\n'), ((3090, 3113), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3111, 3113), True, 'import oneflow as flow\n'), ((3156, 3167), 'time.time', 'time.time', ([], {}), '()\n', (3165, 3167), False, 'import time\n'), ((885, 941), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Face segmentation"""'}), "(description='Face segmentation')\n", (908, 941), False, 'import argparse\n'), ((2191, 2224), 'model.faceSeg', 'faceSeg', (['img_path', 'model_load_pth'], {}), '(img_path, model_load_pth)\n', (2198, 2224), False, 'from model import faceSeg\n'), ((2329, 2340), 'time.time', 'time.time', ([], {}), '()\n', (2338, 2340), False, 'import time\n'), ((2446, 2457), 'time.time', 'time.time', ([], {}), '()\n', (2455, 2457), False, 'import time\n'), ((2550, 2602), 'numpy.zeros', 'np.zeros', (['(img_height, img_width, 3)'], {'dtype': 'np.uint8'}), '((img_height, img_width, 3), dtype=np.uint8)\n', (2558, 2602), True, 'import numpy as np\n'), ((2638, 2658), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2648, 2658), False, 'import cv2\n'), ((2696, 2736), 'cv2.resize', 'cv2.resize', (['src', '(img_height, img_width)'], {}), '(src, (img_height, img_width))\n', (2706, 2736), False, 'import cv2\n'), ((2982, 3027), 'cv2.imwrite', 'cv2.imwrite', (["(model_dir + 'demo/' + name)", 'face'], {}), "(model_dir + 'demo/' + name, face)\n", (2993, 3027), False, 'import cv2\n'), ((3247, 3258), 'time.time', 'time.time', ([], {}), '()\n', (3256, 3258), False, 'import time\n'), ((3297, 3308), 'time.time', 'time.time', ([], {}), '()\n', (3306, 3308), False, 'import time\n'), ((2901, 2938), 'os.path.exists', 'os.path.exists', (["(model_dir + './demo/')"], {}), "(model_dir + './demo/')\n", (2915, 2938), False, 'import os\n'), ((2948, 2977), 'os.mkdir', 'os.mkdir', (["(model_dir + 'demo/')"], {}), "(model_dir + 'demo/')\n", (2956, 2977), False, 'import os\n'), ((3461, 3474), 'numpy.array', 'np.array', (['t_l'], {}), '(t_l)\n', (3469, 3474), True, 'import numpy as np\n'), ((3580, 3597), 'numpy.array', 'np.array', (['delta_t'], {}), '(delta_t)\n', (3588, 3597), True, 'import numpy as np\n'), ((3608, 3625), 'numpy.array', 'np.array', (['delta_t'], {}), '(delta_t)\n', (3616, 3625), True, 'import numpy as np\n'), ((3477, 3490), 'numpy.array', 'np.array', (['t_l'], {}), '(t_l)\n', (3485, 3490), True, 'import numpy as np\n')]
import os import json import oneflow as flow from oneflow.utils.data import Dataset def read_data(task_name, split): fn = os.path.join(task_name, split, "{}.json".format(split)) input_ids = [] attention_mask = [] labels = [] with open(fn, "r") as f: result = json.load(f) for pack_data in result: input_ids.append(pack_data["input_ids"]) attention_mask.append(pack_data["attention_mask"]) labels.append(pack_data["labels"]) input_ids = flow.tensor(input_ids, dtype=flow.int32) attention_mask = flow.tensor(attention_mask, dtype=flow.int32) labels = flow.tensor(labels, dtype=flow.long) return input_ids, attention_mask, labels class AFQMCDataset(Dataset): def __init__(self, input_ids, attention_mask, labels): super(AFQMCDataset, self).__init__() self.input_ids = input_ids self.attention_mask = attention_mask self.labels = labels def __getitem__(self, key): return self.input_ids[key], self.attention_mask[key], self.labels[key] def __len__(self): return self.input_ids.shape[0]
[ "oneflow.tensor" ]
[((515, 555), 'oneflow.tensor', 'flow.tensor', (['input_ids'], {'dtype': 'flow.int32'}), '(input_ids, dtype=flow.int32)\n', (526, 555), True, 'import oneflow as flow\n'), ((577, 622), 'oneflow.tensor', 'flow.tensor', (['attention_mask'], {'dtype': 'flow.int32'}), '(attention_mask, dtype=flow.int32)\n', (588, 622), True, 'import oneflow as flow\n'), ((636, 672), 'oneflow.tensor', 'flow.tensor', (['labels'], {'dtype': 'flow.long'}), '(labels, dtype=flow.long)\n', (647, 672), True, 'import oneflow as flow\n'), ((290, 302), 'json.load', 'json.load', (['f'], {}), '(f)\n', (299, 302), False, 'import json\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np from oneflow.compatible import single_client as flow from oneflow.compatible.single_client import typing as oft import unittest import os func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) def _check_unique(test_case, x, y, idx, count, num_unique): ref_y, ref_count = np.unique(x, return_counts=True) sorted_idx = np.argsort(ref_y) ref_y = ref_y[sorted_idx] ref_count = ref_count[sorted_idx] num_unique = num_unique.item() test_case.assertTrue(num_unique, np.size(ref_y)) y = y[0:num_unique] test_case.assertTrue(np.array_equal(y[idx], x)) sorted_idx = np.argsort(y) test_case.assertTrue(np.array_equal(ref_y, y[sorted_idx])) count = count[0:num_unique] test_case.assertTrue(np.array_equal(count[sorted_idx], ref_count)) def _run_test(test_case, x, dtype, device): @flow.global_function(function_config=func_config) def UniqueWithCountsJob(x: oft.Numpy.Placeholder(x.shape, dtype=dtype)): with flow.scope.placement(device, "0:0"): return flow.experimental.unique_with_counts(x) y, idx, count, num_unique = UniqueWithCountsJob(x).get() _check_unique( test_case, x, y.numpy(), idx.numpy(), count.numpy(), num_unique.numpy() ) @flow.unittest.skip_unless_1n1d() class TestUnique(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_unique_with_counts_int(test_case): x = np.asarray(list(range(32)) * 2).astype(np.int32) np.random.shuffle(x) _run_test(test_case, x, flow.int32, "gpu") @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_unique_with_counts_float(test_case): x = np.asarray(list(range(32)) * 2).astype(np.float32) np.random.shuffle(x) _run_test(test_case, x, flow.float32, "gpu") @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_unique_with_counts_random_gpu(test_case): x = np.random.randint(0, 32, 1024).astype(np.int32) np.random.shuffle(x) _run_test(test_case, x, flow.int32, "gpu") def test_unique_with_counts_random_cpu(test_case): x = np.random.randint(0, 32, 1024).astype(np.int32) np.random.shuffle(x) _run_test(test_case, x, flow.int32, "cpu") if __name__ == "__main__": unittest.main()
[ "oneflow.compatible.single_client.unittest.skip_unless_1n1d", "oneflow.compatible.single_client.FunctionConfig", "oneflow.compatible.single_client.scope.placement", "oneflow.compatible.single_client.experimental.unique_with_counts", "oneflow.compatible.single_client.typing.Numpy.Placeholder", "oneflow.compatible.single_client.global_function" ]
[((762, 783), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (781, 783), True, 'from oneflow.compatible import single_client as flow\n'), ((1865, 1897), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1895, 1897), True, 'from oneflow.compatible import single_client as flow\n'), ((911, 943), 'numpy.unique', 'np.unique', (['x'], {'return_counts': '(True)'}), '(x, return_counts=True)\n', (920, 943), True, 'import numpy as np\n'), ((961, 978), 'numpy.argsort', 'np.argsort', (['ref_y'], {}), '(ref_y)\n', (971, 978), True, 'import numpy as np\n'), ((1228, 1241), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (1238, 1241), True, 'import numpy as np\n'), ((1459, 1508), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1479, 1508), True, 'from oneflow.compatible import single_client as flow\n'), ((2990, 3005), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3003, 3005), False, 'import unittest\n'), ((1119, 1133), 'numpy.size', 'np.size', (['ref_y'], {}), '(ref_y)\n', (1126, 1133), True, 'import numpy as np\n'), ((1184, 1209), 'numpy.array_equal', 'np.array_equal', (['y[idx]', 'x'], {}), '(y[idx], x)\n', (1198, 1209), True, 'import numpy as np\n'), ((1267, 1303), 'numpy.array_equal', 'np.array_equal', (['ref_y', 'y[sorted_idx]'], {}), '(ref_y, y[sorted_idx])\n', (1281, 1303), True, 'import numpy as np\n'), ((1362, 1406), 'numpy.array_equal', 'np.array_equal', (['count[sorted_idx]', 'ref_count'], {}), '(count[sorted_idx], ref_count)\n', (1376, 1406), True, 'import numpy as np\n'), ((2137, 2157), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (2154, 2157), True, 'import numpy as np\n'), ((1961, 1995), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1970, 1995), False, 'import os\n'), ((2411, 2431), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (2428, 2431), True, 'import numpy as np\n'), ((2231, 2265), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2240, 2265), False, 'import os\n'), ((2689, 2709), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (2706, 2709), True, 'import numpy as np\n'), ((2507, 2541), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2516, 2541), False, 'import os\n'), ((2885, 2905), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (2902, 2905), True, 'import numpy as np\n'), ((1540, 1583), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (1561, 1583), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1599, 1634), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (1619, 1634), True, 'from oneflow.compatible import single_client as flow\n'), ((1655, 1694), 'oneflow.compatible.single_client.experimental.unique_with_counts', 'flow.experimental.unique_with_counts', (['x'], {}), '(x)\n', (1691, 1694), True, 'from oneflow.compatible import single_client as flow\n'), ((2633, 2663), 'numpy.random.randint', 'np.random.randint', (['(0)', '(32)', '(1024)'], {}), '(0, 32, 1024)\n', (2650, 2663), True, 'import numpy as np\n'), ((2829, 2859), 'numpy.random.randint', 'np.random.randint', (['(0)', '(32)', '(1024)'], {}), '(0, 32, 1024)\n', (2846, 2859), True, 'import numpy as np\n')]
from models.seq_seq_oneflow import * from utils.utils_oneflow import * import oneflow.optim as optim import oneflow.nn as nn import time import random import argparse # refer to: https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html def _parse_args(): parser = argparse.ArgumentParser("flags for train seq2seq") parser.add_argument( "--device", type=str, default="cuda", help="device", ) parser.add_argument( "--save_encoder_checkpoint_path", type=str, default="./saving_model_oneflow/encoder/", help="save checkpoint encoder dir", ) parser.add_argument( "--save_decoder_checkpoint_path", type=str, default="./saving_model_oneflow/decoder/", help="save checkpoint decoder dir", ) parser.add_argument("--hidden_size", type=int, default=256, help="hidden size") parser.add_argument("--n_iters", type=int, default=75000, help="num of iters") parser.add_argument("--lr", type=float, default=0.01, help="learning rate") parser.add_argument( "--drop", type=float, default=0.1, help="the dropout of decoder_embedding" ) return parser.parse_args() def train( input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH, ): encoder_hidden = encoder.init_Hidden().to(device) encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_tensor.size(0) target_length = target_tensor.size(0) encoder_outputs = [] loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden) encoder_outputs.append(encoder_output[0]) if len(encoder_outputs) != MAX_LENGTH: for _ in range(MAX_LENGTH - len(encoder_outputs)): encoder_outputs.append(flow.zeros((1, 256)).to(device)) encoder_outputs = flow.cat(encoder_outputs, dim=0) decoder_input = flow.tensor([[SOS_token]]).to(device) decoder_hidden = encoder_hidden use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) loss += criterion(decoder_output, target_tensor[di]) decoder_input = target_tensor[di] else: for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) _, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() # detach from history as input loss += criterion(decoder_output, target_tensor[di]) if decoder_input.numpy() == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.numpy() / target_length def trainIters( encoder, decoder, n_iters, pairs, input_lang, output_lang, print_every=1000, plot_every=100, learning_rate=0.01, ): start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) training_pairs = [ tensorsFromPair(random.choice(pairs), input_lang, output_lang) for _ in range(n_iters) ] criterion = nn.NLLLoss() for iter in range(1, n_iters + 1): training_pair = training_pairs[iter - 1] input_tensor = training_pair[0] target_tensor = training_pair[1] loss = train( input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, ) print_loss_total += loss plot_loss_total += loss if iter % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print( "%s (%d %d%%) %.4f" % ( timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg, ) ) if iter % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 showPlot(plot_losses) def main(args): device = args.device # pre input_lang, output_lang, pairs = prepareData("eng", "fra", True) # training hidden_size = args.hidden_size encoder = EncoderRNN_oneflow(input_lang.n_words, hidden_size).to(device) attn_decoder = AttnDecoderRNN_oneflow( hidden_size, output_lang.n_words, dropout_p=args.drop ).to(device) trainIters( encoder, attn_decoder, args.n_iters, pairs, input_lang, output_lang, print_every=5000, plot_every=100, learning_rate=args.lr, ) # saving model...' flow.save(encoder.state_dict(), args.save_encoder_checkpoint_path) flow.save(attn_decoder.state_dict(), args.save_decoder_checkpoint_path) if __name__ == "__main__": args = _parse_args() main(args)
[ "oneflow.nn.NLLLoss" ]
[((292, 342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for train seq2seq"""'], {}), "('flags for train seq2seq')\n", (315, 342), False, 'import argparse\n'), ((3295, 3306), 'time.time', 'time.time', ([], {}), '()\n', (3304, 3306), False, 'import time\n'), ((3727, 3739), 'oneflow.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3737, 3739), True, 'import oneflow.nn as nn\n'), ((2144, 2159), 'random.random', 'random.random', ([], {}), '()\n', (2157, 2159), False, 'import random\n'), ((3626, 3646), 'random.choice', 'random.choice', (['pairs'], {}), '(pairs)\n', (3639, 3646), False, 'import random\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from oneflow.cuda.type_tensor import * def is_available() -> bool: r"""Returns a bool indicating if CUDA is currently available.""" # This function never throws and returns 0 if driver is missing or can't # be initialized return device_count() > 0 def device_count() -> int: r"""Returns the number of GPUs available.""" return flow._oneflow_internal.CudaGetDeviceCount() def current_device() -> int: r"""Returns local rank as device index.""" return flow._oneflow_internal.GetCudaDeviceIndex()
[ "oneflow._oneflow_internal.CudaGetDeviceCount", "oneflow._oneflow_internal.GetCudaDeviceIndex" ]
[((969, 1012), 'oneflow._oneflow_internal.CudaGetDeviceCount', 'flow._oneflow_internal.CudaGetDeviceCount', ([], {}), '()\n', (1010, 1012), True, 'import oneflow as flow\n'), ((1102, 1145), 'oneflow._oneflow_internal.GetCudaDeviceIndex', 'flow._oneflow_internal.GetCudaDeviceIndex', ([], {}), '()\n', (1143, 1145), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.bmm, """ Performs a batch matrix-matrix product of matrices stored in input and mat2. `input` and `mat2` must be 3-D tensors each containing the same number of matrices. If input is a (b x n x m) tensor, mat2 is a (b x m x p) tensor, out will be a (b x n x p) tensor. Args: input(oneflow.Tensor): the first batch of matrices to be multiplied mat2(oneflow.Tensor): the second batch of matrices to be multiplied For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> input1 = flow.randn(10, 3, 4) >>> input2 = flow.randn(10, 4, 5) >>> of_out = flow.bmm(input1, input2) >>> of_out.shape oneflow.Size([10, 3, 5]) """, )
[ "oneflow.framework.docstr.utils.add_docstr" ]
[((660, 1443), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.bmm', '"""\n Performs a batch matrix-matrix product of matrices stored in input and mat2.\n\n `input` and `mat2` must be 3-D tensors each containing the same number of matrices.\n\n If input is a (b x n x m) tensor, mat2 is a (b x m x p) tensor, out will be a (b x n x p) tensor.\n\n Args:\n input(oneflow.Tensor): the first batch of matrices to be multiplied\n mat2(oneflow.Tensor): the second batch of matrices to be multiplied\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input1 = flow.randn(10, 3, 4)\n >>> input2 = flow.randn(10, 4, 5)\n >>> of_out = flow.bmm(input1, input2)\n >>> of_out.shape\n oneflow.Size([10, 3, 5])\n """'], {}), '(oneflow.bmm,\n """\n Performs a batch matrix-matrix product of matrices stored in input and mat2.\n\n `input` and `mat2` must be 3-D tensors each containing the same number of matrices.\n\n If input is a (b x n x m) tensor, mat2 is a (b x m x p) tensor, out will be a (b x n x p) tensor.\n\n Args:\n input(oneflow.Tensor): the first batch of matrices to be multiplied\n mat2(oneflow.Tensor): the second batch of matrices to be multiplied\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input1 = flow.randn(10, 3, 4)\n >>> input2 = flow.randn(10, 4, 5)\n >>> of_out = flow.bmm(input1, input2)\n >>> of_out.shape\n oneflow.Size([10, 3, 5])\n """\n )\n', (670, 1443), False, 'from oneflow.framework.docstr.utils import add_docstr\n')]
from .utils import get_dropout_mask import math from typing import Optional, List, Tuple import oneflow as flow class LstmCellWithProjection(flow.nn.Module): def __init__( self, input_size: int, hidden_size: int, cell_size: int, go_forward: bool = True, recurrent_dropout_probability: float = 0.0, memory_cell_clip_value: Optional[float] = None, state_projection_clip_value: Optional[float] = None, ) -> None: super(LstmCellWithProjection, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.cell_size = cell_size self.go_forward = go_forward self.state_projection_clip_value = state_projection_clip_value self.memory_cell_clip_value = memory_cell_clip_value self.recurrent_dropout_probability = recurrent_dropout_probability self.input_linearity = flow.nn.Linear(input_size, 4 * cell_size, bias=False) self.state_linearity = flow.nn.Linear(hidden_size, 4 * cell_size, bias=True) self.state_projection = flow.nn.Linear(cell_size, hidden_size, bias=False) self.reset_parameters() def reset_parameters(self): # TODO: Modify after oneflow support. # block_orthogonal(self.input_linearity.weight.data, [self.cell_size, self.input_size]) # block_orthogonal(self.state_linearity.weight.data, [self.cell_size, self.hidden_size]) stdv = 1.0 / math.sqrt(self.hidden_size) self.input_linearity.weight.uniform_(-stdv, stdv) self.state_linearity.weight.uniform_(-stdv, stdv) self.state_linearity.bias.data.fill_(0.0) self.state_linearity.bias.data[self.cell_size : 2 * self.cell_size].fill_(1.0) def forward( self, inputs, batch_lengths: List[int], initial_state: Optional[Tuple[flow.Tensor, flow.Tensor]] = None, ): batch_size = inputs.size()[0] total_timesteps = inputs.size()[1] output_accumulator = flow.Tensor( flow.zeros( batch_size, total_timesteps, self.hidden_size, dtype=inputs.dtype ).to(inputs.device) ) if initial_state is None: full_batch_previous_memory = flow.Tensor( flow.zeros(batch_size, self.cell_size, dtype=inputs.dtype).to( inputs.device ) ) full_batch_previous_state = flow.Tensor( flow.zeros(batch_size, self.hidden_size, dtype=inputs.dtype).to( inputs.device ) ) else: full_batch_previous_state = initial_state[0].squeeze(0) full_batch_previous_memory = initial_state[1].squeeze(0) current_length_index = batch_size - 1 if self.go_forward else 0 if self.recurrent_dropout_probability > 0.0 and self.training: dropout_mask = get_dropout_mask( self.recurrent_dropout_probability, full_batch_previous_state ) else: dropout_mask = None for timestep in range(total_timesteps): index = timestep if self.go_forward else total_timesteps - timestep - 1 if self.go_forward: while batch_lengths[current_length_index] <= index: current_length_index -= 1 else: while ( current_length_index < (batch_lengths.size(0) - 1) and batch_lengths[current_length_index + 1] > index ): current_length_index += 1 previous_memory = full_batch_previous_memory[ 0 : current_length_index + 1 ].clone() previous_state = full_batch_previous_state[ 0 : current_length_index + 1 ].clone() timestep_input = inputs[0 : current_length_index + 1, index] projected_input = self.input_linearity(timestep_input) projected_state = self.state_linearity(previous_state) input_gate = flow.sigmoid( projected_input[:, (0 * self.cell_size) : (1 * self.cell_size)] + projected_state[:, (0 * self.cell_size) : (1 * self.cell_size)] ) forget_gate = flow.sigmoid( projected_input[:, (1 * self.cell_size) : (2 * self.cell_size)] + projected_state[:, (1 * self.cell_size) : (2 * self.cell_size)] ) memory_init = flow.tanh( projected_input[:, (2 * self.cell_size) : (3 * self.cell_size)] + projected_state[:, (2 * self.cell_size) : (3 * self.cell_size)] ) output_gate = flow.sigmoid( projected_input[:, (3 * self.cell_size) : (4 * self.cell_size)] + projected_state[:, (3 * self.cell_size) : (4 * self.cell_size)] ) memory = input_gate * memory_init + forget_gate * previous_memory if self.memory_cell_clip_value: memory = flow.clamp( memory, -self.memory_cell_clip_value, self.memory_cell_clip_value ) pre_projection_timestep_output = output_gate * flow.tanh(memory) timestep_output = self.state_projection(pre_projection_timestep_output) if self.state_projection_clip_value: timestep_output = flow.clamp( timestep_output, -self.state_projection_clip_value, self.state_projection_clip_value, ) if dropout_mask is not None: timestep_output = ( timestep_output * dropout_mask[0 : current_length_index + 1] ) full_batch_previous_memory = flow.Tensor( full_batch_previous_memory.data.clone() ) full_batch_previous_state = flow.Tensor( full_batch_previous_state.data.clone() ) full_batch_previous_memory[0 : current_length_index + 1] = memory full_batch_previous_state[0 : current_length_index + 1] = timestep_output output_accumulator[0 : current_length_index + 1, index] = timestep_output final_state = ( full_batch_previous_state.unsqueeze(0), full_batch_previous_memory.unsqueeze(0), ) return output_accumulator, final_state
[ "oneflow.nn.Linear", "oneflow.tanh", "oneflow.sigmoid", "oneflow.clamp", "oneflow.zeros" ]
[((928, 981), 'oneflow.nn.Linear', 'flow.nn.Linear', (['input_size', '(4 * cell_size)'], {'bias': '(False)'}), '(input_size, 4 * cell_size, bias=False)\n', (942, 981), True, 'import oneflow as flow\n'), ((1013, 1066), 'oneflow.nn.Linear', 'flow.nn.Linear', (['hidden_size', '(4 * cell_size)'], {'bias': '(True)'}), '(hidden_size, 4 * cell_size, bias=True)\n', (1027, 1066), True, 'import oneflow as flow\n'), ((1100, 1150), 'oneflow.nn.Linear', 'flow.nn.Linear', (['cell_size', 'hidden_size'], {'bias': '(False)'}), '(cell_size, hidden_size, bias=False)\n', (1114, 1150), True, 'import oneflow as flow\n'), ((1476, 1503), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (1485, 1503), False, 'import math\n'), ((4115, 4250), 'oneflow.sigmoid', 'flow.sigmoid', (['(projected_input[:, 0 * self.cell_size:1 * self.cell_size] +\n projected_state[:, 0 * self.cell_size:1 * self.cell_size])'], {}), '(projected_input[:, 0 * self.cell_size:1 * self.cell_size] +\n projected_state[:, 0 * self.cell_size:1 * self.cell_size])\n', (4127, 4250), True, 'import oneflow as flow\n'), ((4331, 4466), 'oneflow.sigmoid', 'flow.sigmoid', (['(projected_input[:, 1 * self.cell_size:2 * self.cell_size] +\n projected_state[:, 1 * self.cell_size:2 * self.cell_size])'], {}), '(projected_input[:, 1 * self.cell_size:2 * self.cell_size] +\n projected_state[:, 1 * self.cell_size:2 * self.cell_size])\n', (4343, 4466), True, 'import oneflow as flow\n'), ((4547, 4679), 'oneflow.tanh', 'flow.tanh', (['(projected_input[:, 2 * self.cell_size:3 * self.cell_size] +\n projected_state[:, 2 * self.cell_size:3 * self.cell_size])'], {}), '(projected_input[:, 2 * self.cell_size:3 * self.cell_size] +\n projected_state[:, 2 * self.cell_size:3 * self.cell_size])\n', (4556, 4679), True, 'import oneflow as flow\n'), ((4760, 4895), 'oneflow.sigmoid', 'flow.sigmoid', (['(projected_input[:, 3 * self.cell_size:4 * self.cell_size] +\n projected_state[:, 3 * self.cell_size:4 * self.cell_size])'], {}), '(projected_input[:, 3 * self.cell_size:4 * self.cell_size] +\n projected_state[:, 3 * self.cell_size:4 * self.cell_size])\n', (4772, 4895), True, 'import oneflow as flow\n'), ((5098, 5175), 'oneflow.clamp', 'flow.clamp', (['memory', '(-self.memory_cell_clip_value)', 'self.memory_cell_clip_value'], {}), '(memory, -self.memory_cell_clip_value, self.memory_cell_clip_value)\n', (5108, 5175), True, 'import oneflow as flow\n'), ((5274, 5291), 'oneflow.tanh', 'flow.tanh', (['memory'], {}), '(memory)\n', (5283, 5291), True, 'import oneflow as flow\n'), ((5460, 5561), 'oneflow.clamp', 'flow.clamp', (['timestep_output', '(-self.state_projection_clip_value)', 'self.state_projection_clip_value'], {}), '(timestep_output, -self.state_projection_clip_value, self.\n state_projection_clip_value)\n', (5470, 5561), True, 'import oneflow as flow\n'), ((2056, 2133), 'oneflow.zeros', 'flow.zeros', (['batch_size', 'total_timesteps', 'self.hidden_size'], {'dtype': 'inputs.dtype'}), '(batch_size, total_timesteps, self.hidden_size, dtype=inputs.dtype)\n', (2066, 2133), True, 'import oneflow as flow\n'), ((2297, 2355), 'oneflow.zeros', 'flow.zeros', (['batch_size', 'self.cell_size'], {'dtype': 'inputs.dtype'}), '(batch_size, self.cell_size, dtype=inputs.dtype)\n', (2307, 2355), True, 'import oneflow as flow\n'), ((2495, 2555), 'oneflow.zeros', 'flow.zeros', (['batch_size', 'self.hidden_size'], {'dtype': 'inputs.dtype'}), '(batch_size, self.hidden_size, dtype=inputs.dtype)\n', (2505, 2555), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.F.prelu, r""" prelu(x: Tensor, alpha: Tensor) -> Tensor Applies the element-wise function: .. math:: prelu(x) = max(0,x) + alpha * min(0,x) For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = flow.Tensor(np.asarray([[[[1, -2], [3, 4]]]]), dtype=flow.float32) >>> alpha = flow.nn.Parameter(flow.Tensor(1, 1, 1).fill_(0.25)) >>> print(flow.F.prelu(x, alpha).numpy()) [[[[ 1. -0.5] [ 3. 4. ]]]] See :class:`~oneflow.nn.PReLU` for more details. """, ) add_docstr( oneflow.F.gelu, r""" gelu(x: Tensor) -> Tensor The equation is: .. math:: out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.F.gelu(input) >>> out tensor([-0.1543, 0. , 0.3457], dtype=oneflow.float32) See :class:`~oneflow.nn.GELU` for more details. """, ) add_docstr( oneflow.F.softmax, r""" softmax(x: Tensor) -> Tensor Softmax is defined as: .. math:: \text{Softmax}(x_{i}) = \frac{\\exp(x_i)}{\sum_j \exp(x_j)} See :class:`~oneflow.nn.Softmax` for more details. """, ) add_docstr( oneflow.F.softplus, r""" softplus(x: Tensor) -> Tensor Applies the element-wise function: .. math:: \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) See :class:`~oneflow.nn.Softplus` for more details. """, ) add_docstr( oneflow.F.tanh, r""" tanh(x: Tensor) -> Tensor The equation is: .. math:: out = \frac{e^x-e^{-x}}{e^x+e^{-x}} See :class:`~oneflow.nn.Tanh` for more details. """, ) add_docstr( oneflow.F.log_sigmoid, r""" log_sigmoid(x: Tensor) -> Tensor Applies the element-wise function: .. math:: \text{log_sigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right) For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.F.log_sigmoid(input) >>> out tensor([-0.9741, -0.6931, -0.4741], dtype=oneflow.float32) See :class:`~oneflow.nn.LogSigmoid` for more details. """, ) add_docstr( oneflow.F.softsign, r""" softsign(x: Tensor) -> Tensor The formula is: .. math:: softsign(x) = \frac{x}{1 + |x|} For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([1, 2, 3]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.F.softsign(input) >>> out tensor([0.5 , 0.6667, 0.75 ], dtype=oneflow.float32) See :class:`~oneflow.nn.Softsign` for more details. """, ) add_docstr( oneflow.F.silu, r""" silu(x: Tensor) -> Tensor The formula is: .. math:: \text{silu}(x) = x * sigmoid(x) For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([1, 2, 3]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.F.silu(input) >>> out tensor([0.7311, 1.7616, 2.8577], dtype=oneflow.float32) See :class:`~oneflow.nn.SiLU` for more details. """, ) add_docstr( oneflow.F.mish, r""" mish(x: Tensor) -> Tensor Applies the element-wise function: .. math:: \text{mish}(x) = x * \text{tanh}(\text{softplus}(x)) For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([1, 2, 3]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.F.mish(input) >>> out tensor([0.8651, 1.944 , 2.9865], dtype=oneflow.float32) See : class:`~oneflow.nn.Mish` for more details. """, ) add_docstr( oneflow.F.relu, r""" relu(x: Tensor, inplace: bool =False) -> Tensor Applies the rectified linear unit function element-wise. See :class:`~oneflow.nn.ReLU` for more details. """, ) add_docstr( oneflow.F.hardsigmoid, r""" hardsigmoid(x: Tensor)-> Tensor Applies the element-wise function .. math:: \text{Hardsigmoid}(x) = \begin{cases} 0 & \text{if~} x \le -3, \\ 1 & \text{if~} x \ge +3, \\ x / 6 + 1 / 2 & \text{otherwise} \end{cases} Args: inplace: If set to ``True``, will do this operation in-place. Default: ``False`` See :class:`~oneflow.nn.Hardsigmoid` for more details. """, ) add_docstr( oneflow.F.hardswish, r""" hardswish(x: Tensor)-> Tensor Applies the hardswish function, element-wise, as described in the paper: `Searching for MobileNetV3`_. .. math:: \text{Hardswish}(x) = \begin{cases} 0 & \text{if~} x \le -3, \\ x & \text{if~} x \ge +3, \\ x \cdot (x + 3) /6 & \text{otherwise} \end{cases} See :class:`~oneflow.nn.Hardswish` for more details. .. _`Searching for MobileNetV3`: https://arxiv.org/abs/1905.02244 """, ) add_docstr( oneflow.F.sigmoid, r""" sigmoid(input) -> Tensor Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}` See :class:`~oneflow.nn.Sigmoid` for more details. For examples: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = flow.Tensor(np.array([0.81733328, 0.43621480, 0.10351428])) >>> input = flow.Tensor(x) >>> out = flow.nn.functional.sigmoid(input) >>> out tensor([0.6937, 0.6074, 0.5259], dtype=oneflow.float32) """, ) add_docstr( oneflow.F.hardtanh, r""" hardtanh(input, min_val=-1., max_val=1.) -> Tensor Applies the HardTanh function element-wise. See :class:`~oneflow.nn.Hardtanh` for more details. """, ) add_docstr( oneflow.F.leaky_relu, r""" leaky_relu(x: Tensor, alpha :Float) -> Tensor Applies element-wise, :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)` See :class:`~oneflow.nn.LeakyReLU` for more details. """, ) add_docstr( oneflow.F.elu, r""" elu(x: Tensor, alpha :Float) -> Tensor Applies element-wise, :math:`\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))`. See :class:`~oneflow.nn.ELU` for more details. For examples: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.nn.functional.elu(input, alpha=1.0) >>> out tensor([-0.3935, 0. , 0.5 ], dtype=oneflow.float32) """, ) add_docstr( oneflow.F.selu, r""" selu(x: Tensor) -> Tensor Applies element-wise, :math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`, with :math:`\alpha=1.6732632423543772848170429916717` and :math:`scale=1.0507009873554804934193349852946`. See :class:`~oneflow.nn.SELU` for more details. For examples: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = np.array([1, 2, 3]).astype(np.float32) >>> input = flow.Tensor(x) >>> out = flow.nn.functional.selu(input) >>> out tensor([1.0507, 2.1014, 3.1521], dtype=oneflow.float32) """, )
[ "oneflow.framework.docstr.utils.add_docstr" ]
[((660, 1291), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.prelu', '"""\n prelu(x: Tensor, alpha: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n prelu(x) = max(0,x) + alpha * min(0,x) \n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.asarray([[[[1, -2], [3, 4]]]]), dtype=flow.float32)\n >>> alpha = flow.nn.Parameter(flow.Tensor(1, 1, 1).fill_(0.25))\n >>> print(flow.F.prelu(x, alpha).numpy())\n [[[[ 1. -0.5]\n [ 3. 4. ]]]]\n \n See\n :class:`~oneflow.nn.PReLU` for more details.\n \n """'], {}), '(oneflow.F.prelu,\n """\n prelu(x: Tensor, alpha: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n prelu(x) = max(0,x) + alpha * min(0,x) \n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.asarray([[[[1, -2], [3, 4]]]]), dtype=flow.float32)\n >>> alpha = flow.nn.Parameter(flow.Tensor(1, 1, 1).fill_(0.25))\n >>> print(flow.F.prelu(x, alpha).numpy())\n [[[[ 1. -0.5]\n [ 3. 4. ]]]]\n \n See\n :class:`~oneflow.nn.PReLU` for more details.\n \n """\n )\n', (670, 1291), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1297, 1901), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.gelu', '"""\n gelu(x: Tensor) -> Tensor \n\n The equation is:\n\n .. math::\n out = 0.5 * x * (1 + tanh(\\\\sqrt{\\\\frac{2}{\\\\pi}} * (x + 0.044715x^{3})))\n \n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x)\n\n >>> out = flow.F.gelu(input)\n >>> out\n tensor([-0.1543, 0. , 0.3457], dtype=oneflow.float32)\n\n See \n :class:`~oneflow.nn.GELU` for more details.\n \n """'], {}), '(oneflow.F.gelu,\n """\n gelu(x: Tensor) -> Tensor \n\n The equation is:\n\n .. math::\n out = 0.5 * x * (1 + tanh(\\\\sqrt{\\\\frac{2}{\\\\pi}} * (x + 0.044715x^{3})))\n \n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x)\n\n >>> out = flow.F.gelu(input)\n >>> out\n tensor([-0.1543, 0. , 0.3457], dtype=oneflow.float32)\n\n See \n :class:`~oneflow.nn.GELU` for more details.\n \n """\n )\n', (1307, 1901), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1904, 2166), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.softmax', '"""\n softmax(x: Tensor) -> Tensor \n\n Softmax is defined as:\n\n .. math::\n \\\\text{Softmax}(x_{i}) = \\\\frac{\\\\\\\\exp(x_i)}{\\\\sum_j \\\\exp(x_j)}\n \n See :class:`~oneflow.nn.Softmax` for more details.\n """'], {}), '(oneflow.F.softmax,\n """\n softmax(x: Tensor) -> Tensor \n\n Softmax is defined as:\n\n .. math::\n \\\\text{Softmax}(x_{i}) = \\\\frac{\\\\\\\\exp(x_i)}{\\\\sum_j \\\\exp(x_j)}\n \n See :class:`~oneflow.nn.Softmax` for more details.\n """\n )\n', (1914, 2166), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2164, 2450), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.softplus', '"""\n softplus(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{Softplus}(x) = \\\\frac{1}{\\\\beta} * \\\\log(1 + \\\\exp(\\\\beta * x)) \n \n See :class:`~oneflow.nn.Softplus` for more details.\n """'], {}), '(oneflow.F.softplus,\n """\n softplus(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{Softplus}(x) = \\\\frac{1}{\\\\beta} * \\\\log(1 + \\\\exp(\\\\beta * x)) \n \n See :class:`~oneflow.nn.Softplus` for more details.\n """\n )\n', (2174, 2450), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2449, 2664), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.tanh', '"""\n tanh(x: Tensor) -> Tensor \n\n The equation is:\n\n .. math::\n\n out = \\\\frac{e^x-e^{-x}}{e^x+e^{-x}}\n\n See :class:`~oneflow.nn.Tanh` for more details.\n """'], {}), '(oneflow.F.tanh,\n """\n tanh(x: Tensor) -> Tensor \n\n The equation is:\n\n .. math::\n\n out = \\\\frac{e^x-e^{-x}}{e^x+e^{-x}}\n\n See :class:`~oneflow.nn.Tanh` for more details.\n """\n )\n', (2459, 2664), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2667, 3320), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.log_sigmoid', '"""\n log_sigmoid(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{log_sigmoid}(x) = \\\\log\\\\left(\\\\frac{ 1 }{ 1 + \\\\exp(-x)}\\\\right)\n \n For example:\n\n .. code-block:: python\n\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x) \n \n >>> out = flow.F.log_sigmoid(input)\n >>> out\n tensor([-0.9741, -0.6931, -0.4741], dtype=oneflow.float32)\n\n See :class:`~oneflow.nn.LogSigmoid` for more details.\n\n """'], {}), '(oneflow.F.log_sigmoid,\n """\n log_sigmoid(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{log_sigmoid}(x) = \\\\log\\\\left(\\\\frac{ 1 }{ 1 + \\\\exp(-x)}\\\\right)\n \n For example:\n\n .. code-block:: python\n\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x) \n \n >>> out = flow.F.log_sigmoid(input)\n >>> out\n tensor([-0.9741, -0.6931, -0.4741], dtype=oneflow.float32)\n\n See :class:`~oneflow.nn.LogSigmoid` for more details.\n\n """\n )\n', (2677, 3320), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3319, 3896), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.softsign', '"""\n softsign(x: Tensor) -> Tensor \n\n The formula is: \n \n .. math:: \n \n softsign(x) = \\\\frac{x}{1 + |x|}\n \n For example:\n \n .. code-block:: python\n \n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n >>> out = flow.F.softsign(input)\n >>> out\n tensor([0.5 , 0.6667, 0.75 ], dtype=oneflow.float32)\n \n See :class:`~oneflow.nn.Softsign` for more details.\n \n """'], {}), '(oneflow.F.softsign,\n """\n softsign(x: Tensor) -> Tensor \n\n The formula is: \n \n .. math:: \n \n softsign(x) = \\\\frac{x}{1 + |x|}\n \n For example:\n \n .. code-block:: python\n \n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n >>> out = flow.F.softsign(input)\n >>> out\n tensor([0.5 , 0.6667, 0.75 ], dtype=oneflow.float32)\n \n See :class:`~oneflow.nn.Softsign` for more details.\n \n """\n )\n', (3329, 3896), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3901, 4456), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.silu', '"""\n silu(x: Tensor) -> Tensor\n\n The formula is: \n\n .. math::\n \n \\\\text{silu}(x) = x * sigmoid(x)\n\n For example:\n \n .. code-block:: python\n \n >>> import numpy as np\n >>> import oneflow as flow\n\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n >>> out = flow.F.silu(input)\n >>> out\n tensor([0.7311, 1.7616, 2.8577], dtype=oneflow.float32)\n\n See :class:`~oneflow.nn.SiLU` for more details.\n \n """'], {}), '(oneflow.F.silu,\n """\n silu(x: Tensor) -> Tensor\n\n The formula is: \n\n .. math::\n \n \\\\text{silu}(x) = x * sigmoid(x)\n\n For example:\n \n .. code-block:: python\n \n >>> import numpy as np\n >>> import oneflow as flow\n\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n >>> out = flow.F.silu(input)\n >>> out\n tensor([0.7311, 1.7616, 2.8577], dtype=oneflow.float32)\n\n See :class:`~oneflow.nn.SiLU` for more details.\n \n """\n )\n', (3911, 4456), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4461, 5061), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.mish', '""" \n mish(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{mish}(x) = x * \\\\text{tanh}(\\\\text{softplus}(x))\n\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n\n >>> out = flow.F.mish(input)\n >>> out\n tensor([0.8651, 1.944 , 2.9865], dtype=oneflow.float32)\n\n See :\n class:`~oneflow.nn.Mish` for more details.\n \n """'], {}), '(oneflow.F.mish,\n """ \n mish(x: Tensor) -> Tensor \n\n Applies the element-wise function:\n\n .. math::\n \\\\text{mish}(x) = x * \\\\text{tanh}(\\\\text{softplus}(x))\n\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x) \n\n >>> out = flow.F.mish(input)\n >>> out\n tensor([0.8651, 1.944 , 2.9865], dtype=oneflow.float32)\n\n See :\n class:`~oneflow.nn.Mish` for more details.\n \n """\n )\n', (4471, 5061), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5064, 5279), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.relu', '"""\n relu(x: Tensor, inplace: bool =False) -> Tensor\n\n Applies the rectified linear unit function element-wise. See\n :class:`~oneflow.nn.ReLU` for more details.\n\n """'], {}), '(oneflow.F.relu,\n """\n relu(x: Tensor, inplace: bool =False) -> Tensor\n\n Applies the rectified linear unit function element-wise. See\n :class:`~oneflow.nn.ReLU` for more details.\n\n """\n )\n', (5074, 5279), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5283, 5791), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.hardsigmoid', '"""\n hardsigmoid(x: Tensor)-> Tensor\n\n Applies the element-wise function\n\n .. math::\n \\\\text{Hardsigmoid}(x) = \\\\begin{cases}\n 0 & \\\\text{if~} x \\\\le -3, \\\\\\\\\n 1 & \\\\text{if~} x \\\\ge +3, \\\\\\\\\n x / 6 + 1 / 2 & \\\\text{otherwise}\n \\\\end{cases}\n\n Args:\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n\n See :class:`~oneflow.nn.Hardsigmoid` for more details.\n """'], {}), '(oneflow.F.hardsigmoid,\n """\n hardsigmoid(x: Tensor)-> Tensor\n\n Applies the element-wise function\n\n .. math::\n \\\\text{Hardsigmoid}(x) = \\\\begin{cases}\n 0 & \\\\text{if~} x \\\\le -3, \\\\\\\\\n 1 & \\\\text{if~} x \\\\ge +3, \\\\\\\\\n x / 6 + 1 / 2 & \\\\text{otherwise}\n \\\\end{cases}\n\n Args:\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n\n See :class:`~oneflow.nn.Hardsigmoid` for more details.\n """\n )\n', (5293, 5791), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5783, 6342), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.hardswish', '"""\n hardswish(x: Tensor)-> Tensor\n\n Applies the hardswish function, element-wise, as described in the paper:\n\n `Searching for MobileNetV3`_.\n\n .. math::\n \\\\text{Hardswish}(x) = \\\\begin{cases}\n 0 & \\\\text{if~} x \\\\le -3, \\\\\\\\\n x & \\\\text{if~} x \\\\ge +3, \\\\\\\\\n x \\\\cdot (x + 3) /6 & \\\\text{otherwise}\n \\\\end{cases}\n\n See :class:`~oneflow.nn.Hardswish` for more details.\n\n .. _`Searching for MobileNetV3`:\n https://arxiv.org/abs/1905.02244\n """'], {}), '(oneflow.F.hardswish,\n """\n hardswish(x: Tensor)-> Tensor\n\n Applies the hardswish function, element-wise, as described in the paper:\n\n `Searching for MobileNetV3`_.\n\n .. math::\n \\\\text{Hardswish}(x) = \\\\begin{cases}\n 0 & \\\\text{if~} x \\\\le -3, \\\\\\\\\n x & \\\\text{if~} x \\\\ge +3, \\\\\\\\\n x \\\\cdot (x + 3) /6 & \\\\text{otherwise}\n \\\\end{cases}\n\n See :class:`~oneflow.nn.Hardswish` for more details.\n\n .. _`Searching for MobileNetV3`:\n https://arxiv.org/abs/1905.02244\n """\n )\n', (5793, 6342), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((6333, 6921), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.sigmoid', '"""\n sigmoid(input) -> Tensor\n\n Applies the element-wise function :math:`\\\\text{Sigmoid}(x) = \\\\frac{1}{1 + \\\\exp(-x)}`\n\n See :class:`~oneflow.nn.Sigmoid` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.array([0.81733328, 0.43621480, 0.10351428]))\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.sigmoid(input)\n >>> out\n tensor([0.6937, 0.6074, 0.5259], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.F.sigmoid,\n """\n sigmoid(input) -> Tensor\n\n Applies the element-wise function :math:`\\\\text{Sigmoid}(x) = \\\\frac{1}{1 + \\\\exp(-x)}`\n\n See :class:`~oneflow.nn.Sigmoid` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.array([0.81733328, 0.43621480, 0.10351428]))\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.sigmoid(input)\n >>> out\n tensor([0.6937, 0.6074, 0.5259], dtype=oneflow.float32)\n\n """\n )\n', (6343, 6921), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((6923, 7136), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.hardtanh', '"""\n hardtanh(input, min_val=-1., max_val=1.) -> Tensor\n\n Applies the HardTanh function element-wise. See :class:`~oneflow.nn.Hardtanh` for more\n details.\n\n """'], {}), '(oneflow.F.hardtanh,\n """\n hardtanh(input, min_val=-1., max_val=1.) -> Tensor\n\n Applies the HardTanh function element-wise. See :class:`~oneflow.nn.Hardtanh` for more\n details.\n\n """\n )\n', (6933, 7136), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7140, 7419), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.leaky_relu', '"""\n leaky_relu(x: Tensor, alpha :Float) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{LeakyReLU}(x) = \\\\max(0, x) + \\\\text{negative\\\\_slope} * \\\\min(0, x)`\n\n See :class:`~oneflow.nn.LeakyReLU` for more details.\n\n """'], {}), '(oneflow.F.leaky_relu,\n """\n leaky_relu(x: Tensor, alpha :Float) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{LeakyReLU}(x) = \\\\max(0, x) + \\\\text{negative\\\\_slope} * \\\\min(0, x)`\n\n See :class:`~oneflow.nn.LeakyReLU` for more details.\n\n """\n )\n', (7150, 7419), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7418, 8021), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.elu', '"""\n elu(x: Tensor, alpha :Float) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{ELU}(x) = \\\\max(0,x) + \\\\min(0, \\\\alpha * (\\\\exp(x) - 1))`.\n\n See :class:`~oneflow.nn.ELU` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.elu(input, alpha=1.0)\n >>> out\n tensor([-0.3935, 0. , 0.5 ], dtype=oneflow.float32)\n """'], {}), '(oneflow.F.elu,\n """\n elu(x: Tensor, alpha :Float) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{ELU}(x) = \\\\max(0,x) + \\\\min(0, \\\\alpha * (\\\\exp(x) - 1))`.\n\n See :class:`~oneflow.nn.ELU` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.elu(input, alpha=1.0)\n >>> out\n tensor([-0.3935, 0. , 0.5 ], dtype=oneflow.float32)\n """\n )\n', (7428, 8021), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8020, 8729), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.selu', '"""\n selu(x: Tensor) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{SELU}(x) = scale * (\\\\max(0,x) + \\\\min(0, \\\\alpha * (\\\\exp(x) - 1)))`,\n with :math:`\\\\alpha=1.6732632423543772848170429916717` and\n :math:`scale=1.0507009873554804934193349852946`.\n\n See :class:`~oneflow.nn.SELU` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.selu(input)\n >>> out\n tensor([1.0507, 2.1014, 3.1521], dtype=oneflow.float32)\n """'], {}), '(oneflow.F.selu,\n """\n selu(x: Tensor) -> Tensor\n\n Applies element-wise,\n :math:`\\\\text{SELU}(x) = scale * (\\\\max(0,x) + \\\\min(0, \\\\alpha * (\\\\exp(x) - 1)))`,\n with :math:`\\\\alpha=1.6732632423543772848170429916717` and\n :math:`scale=1.0507009873554804934193349852946`.\n\n See :class:`~oneflow.nn.SELU` for more details.\n\n For examples:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> input = flow.Tensor(x)\n >>> out = flow.nn.functional.selu(input)\n >>> out\n tensor([1.0507, 2.1014, 3.1521], dtype=oneflow.float32)\n """\n )\n', (8030, 8729), False, 'from oneflow.framework.docstr.utils import add_docstr\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections from typing import Optional, Sequence, Union import oneflow as flow from oneflow.framework.tensor import register_tensor_op from oneflow.nn.module import Module from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid from oneflow.ops.transpose_util import ( get_inversed_perm, get_perm_when_transpose_axis_to_last_dim, ) class ScalarMul(Module): def __init__(self, alpha) -> None: super().__init__() if not isinstance(alpha, (int, float)): raise ValueError("alpha type can only be int or float") self.alpha = alpha def forward(self, x): return flow.F.mul_scalar(x, self.alpha) class ScalarMulByTensor(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.mul_scalar_by_tensor(x, y) class ElementwiseMul(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.mul(x, y) class BroadcastMul(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.broadcast_mul(x, y) @register_tensor_op("mul") def _mul(input, other): """Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported. The formula is: .. math:: out = input \\times other For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow # element-wise multiply >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.mul(input,other).numpy() >>> out.shape (2, 3) # scalar mutiply >>> input = 5 >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.mul(input,other).numpy() >>> out.shape (2, 3) # broadcast mutiply >>> input = flow.Tensor(np.random.randn(1,1)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.mul(input,other).numpy() >>> out.shape (2, 3) """ if isinstance(input, (int, float)): return ScalarMul(input)(other) elif isinstance(other, (int, float)): return ScalarMul(other)(input) elif input.shape == other.shape: return ElementwiseMul()(input, other) elif input.shape == (1,): return ScalarMulByTensor()(other, input) elif other.shape == (1,): return ScalarMulByTensor()(input, other) else: return BroadcastMul()(input, other) class Variance(Module): def __init__(self, dim: int = None, keepdim: bool = False) -> None: super().__init__() self.dim = dim self.keepdim = keepdim def forward(self, input): axis = _check_axis(self.dim, input.shape) if isinstance(axis, list) and len(axis) == 0: return flow.zeros(size=input.shape) else: return flow.sub( flow.mean(flow.square(input), axis, self.keepdim), flow.square(flow.mean(input, axis, self.keepdim)), ) @register_tensor_op("var") def variance_op(input, dim=None, keepdim=False): """Returns the variance of each row of the `input` tensor in the given dimension `dim`. If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, dim is squeezed (see `flow.squeeze()`), resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). Args: input (Tensor): the input tensor. dim (int or tuple of python:ints): the dimension or dimensions to reduce. Defaults to None. keepdim (bool, optional): whether the output tensor has dim retained or not. Defaults to False. Returns: Tensor: The result of variance on the specified axis of input Tensor For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> np_arr = np.random.randn(2,3,4,5) >>> input = flow.Tensor(np_arr) >>> output = flow.var(input, 1, True) """ return Variance(dim, keepdim)(input) class ScalarSubByTensor(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.sub_scalar_by_tensor(x, y) class BroadcastSub(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.broadcast_sub(x, y) class ScalarAdd(Module): def __init__(self, alpha, inplace: bool = False) -> None: super().__init__() if not isinstance(alpha, int) and (not isinstance(alpha, float)): raise ValueError("scalar type can only be int or float") self.alpha = alpha self.inplace = inplace def forward(self, x): if self.inplace: _check_inplace_valid(x) return flow.F.add_scalar(x, self.alpha, self.inplace) @register_tensor_op("sub") def _sub(input, other): """Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported. The formula is: .. math:: out = input - other For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow # element-wise subtract >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.sub(input,other).numpy() >>> out.shape (2, 3) # scalar subtract >>> input = 5 >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.sub(input,other).numpy() >>> out.shape (2, 3) # broadcast subtract >>> input = flow.Tensor(np.random.randn(1,1)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.sub(input,other).numpy() >>> out.shape (2, 3) """ if isinstance(input, (int, float)): return ScalarAdd(input)(ScalarMul(-1)(other)) elif isinstance(other, (int, float)): return ScalarAdd(-1 * other)(input) elif input.shape == other.shape: return BroadcastSub()(input, other) elif other.shape == (1,): return ScalarSubByTensor()(input, other) else: return BroadcastSub()(input, other) class BroadcastDiv(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.broadcast_div(x, y) class ScalarDivByTensor(Module): def __init__(self) -> None: super().__init__() def forward(self, x, scalar): return flow.F.div_scalar_by_tensor(x, scalar) @register_tensor_op("div") def _div(input, other): """Computes the division of input by other for each element, scalar and broadcast promotation are supported. The formula is: .. math:: out = \\frac{input}{other} Args: input (Union[int, float, flow.Tensor]): input. other (Union[int, float, flow.Tensor]): other. For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow # element-wise divide >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.div(input,other).numpy() >>> out.shape (2, 3) # scalar divide >>> input = 5 >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.div(input,other).numpy() >>> out.shape (2, 3) # broadcast divide >>> input = flow.Tensor(np.random.randn(1,1)) >>> other = flow.Tensor(np.random.randn(2,3)) >>> out = flow.div(input,other).numpy() >>> out.shape (2, 3) """ if isinstance(input, (int, float)): return ScalarMul(input)(flow.reciprocal(other)) elif isinstance(other, (int, float)): if other == 0 or other == 0.0: other = 0.0 else: other = 1.0 / float(other) return ScalarMul(other)(input) elif input.shape == other.shape: return BroadcastDiv()(input, other) elif other.shape == (1,): return ScalarDivByTensor()(input, other) else: return BroadcastDiv()(input, other) class Reciprocal(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.reciprocal_no_nan(x) @register_tensor_op("reciprocal") def _reciprocal(x): """Computes the safe reciprocal of x. If x is zero, the reciprocal will be also set to zero. For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> x = flow.Tensor(np.array([[1, 2, 3], [4, 5, 6]])) >>> out = flow.reciprocal(x) >>> out.numpy() array([[1. , 0.5 , 0.33333334], [0.25 , 0.2 , 0.16666667]], dtype=float32) """ return Reciprocal()(x) class ScalarAddByTensor(Module): def __init__(self, inplace: bool = False) -> None: super().__init__() self.inplace = inplace def forward(self, x, y): if self.inplace: _check_inplace_valid(x) return flow.F.add_scalar_by_tensor(x, y, self.inplace) class ElementwiseAdd(Module): def __init__(self, inplace: bool = False) -> None: super().__init__() self.inplace = inplace def forward(self, x, y): if self.inplace: _check_inplace_valid(x) return flow.F.add(x, y, self.inplace) class BroadcastAdd(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): return flow.F.broadcast_add(x, y) @register_tensor_op("add") def _add(x, y): """Computes the addition of x by y for each element, scalar and broadcast promotation are supported. The formula is: .. math:: out = x + y For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow # element-wise add >>> x = flow.Tensor(np.random.randn(2,3)) >>> y = flow.Tensor(np.random.randn(2,3)) >>> out = flow.add(x, y).numpy() >>> out.shape (2, 3) # scalar add >>> x = 5 >>> y = flow.Tensor(np.random.randn(2,3)) >>> out = flow.add(x, y).numpy() >>> out.shape (2, 3) # broadcast add >>> x = flow.Tensor(np.random.randn(1,1)) >>> y = flow.Tensor(np.random.randn(2,3)) >>> out = flow.add(x, y).numpy() >>> out.shape (2, 3) """ if isinstance(x, (int, float)): return ScalarAdd(x)(y) elif isinstance(y, (int, float)): return ScalarAdd(y)(x) elif x.shape == y.shape: return ElementwiseAdd()(x, y) elif x.shape == (1,): return ScalarAddByTensor()(y, x) elif y.shape == (1,): return ScalarAddByTensor()(x, y) else: return BroadcastAdd()(x, y) @register_tensor_op("add_") def _add_inplace(x, y): """ In-place version of :func:`oneflow.Tensor.add`. """ if isinstance(y, (int, float)): return ScalarAdd(y, inplace=True)(x) elif x.shape == y.shape: return ElementwiseAdd(inplace=True)(x, y) elif x.shape == (1,): raise RuntimeError( f"output with shape {x.shape} doesn't match the broadcast shape {y.shape}" ) elif y.shape == (1,): return ScalarAddByTensor(inplace=True)(x, y) else: y = flow.broadcast_like(y, x) return ElementwiseAdd(inplace=True)(x, y) class Asin(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.asin(x) def asin_op(input): """ Returns a new tensor with the arcsine of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\sin^{-1}(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> input = flow.Tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32) >>> output = flow.asin(input) >>> output.shape flow.Size([4]) >>> output tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32) >>> input1 = flow.Tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32) >>> output1 = input1.asin() >>> output1.shape flow.Size([2, 2]) >>> output1 tensor([[ 0.9273, 1.5708], [-0.6435, -1.5708]], dtype=oneflow.float32) """ return Asin()(input) @register_tensor_op("asin") def asin_op_tensor(input): """ See :func:`oneflow.asin` """ return Asin()(input) def arcsin_op(input): """ Alias for :func:`oneflow.asin` """ return Asin()(input) @register_tensor_op("arcsin") def arcsin_op_tensor(input): """ See :func:`oneflow.asin` """ return Asin()(input) class Asinh(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.asinh(x) def asinh_op(input): """ Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\sinh^{-1}(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> input = flow.Tensor(np.array([2, 3, 4]), dtype=flow.float32) >>> output = flow.asinh(input) >>> output.shape flow.Size([3]) >>> output tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32) >>> input1 = flow.Tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32) >>> output1 = input1.asinh() >>> output1.shape flow.Size([2, 3]) >>> output1 tensor([[-0.8814, 0. , -0.39 ], [ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32) """ return Asinh()(input) def arcsinh_op(input): """ Alias for :func:`oneflow.asinh` """ return Asinh()(input) @register_tensor_op("asinh") def asinh_op_tensor(input): """ See :func:`oneflow.asinh` """ return Asinh()(input) @register_tensor_op("arcsinh") def arcsinh_op_tensor(input): """ See :func:`oneflow.asinh` """ return Asinh()(input) class Sin(Module): def __init__(self, inplace: bool = False) -> None: super().__init__() self.inplace = inplace def forward(self, x): if self.inplace: _check_inplace_valid(x) return flow.F.sin(x, self.inplace) def sin_op(tensor): """ Returns a new tensor with the sine of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\sin(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x1 = flow.Tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32)) >>> out1 = flow.sin(x1) >>> out1 tensor([-0.5194, 0.1343, -0.4032, -0.2712], dtype=oneflow.float32) >>> x2 = flow.Tensor(np.array([-1.4, 2.6, 3.7]).astype(np.float32),device=flow.device('cuda')) >>> out2 = flow.sin(x2) >>> out2 tensor([-0.9854, 0.5155, -0.5298], device='cuda:0', dtype=oneflow.float32) """ return Sin(inplace=False)(tensor) @register_tensor_op("sin") def sin_op_tensor(tensor): """ sin() -> Tensor See :func:`oneflow.sin` """ return Sin(inplace=False)(tensor) @register_tensor_op("sin_") def inplace_sin_op_tensor(x): """ In-place version of :func:`oneflow.sin` """ return Sin(inplace=True)(x) class Cos(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.cos(x) @register_tensor_op("cos") def cos_op(tensor): """ Returns a new tensor with the cosine of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\cos(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.array([1.4309, 1.2706, -0.8562, 0.9796]) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.cos(input).numpy() """ return Cos()(tensor) class Atan(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.atan(x) def atan_op(tensor): """ Returns a new tensor with the arctangent of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\tan^{-1}(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> input = flow.Tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32) >>> output = flow.atan(input) >>> output.shape flow.Size([3]) """ return Atan()(tensor) @register_tensor_op("atan") def atan_op_tensor(tensor): """ See :func:`oneflow.atan` """ return Atan()(tensor) def arctan_op(tensor): """ Alias for :func:`oneflow.atan` """ return Atan()(tensor) @register_tensor_op("arctan") def arctan_op_tensor(tensor): """ See :func:`oneflow.arctan` """ return Atan()(tensor) class FMod(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): if not isinstance(x, (flow.Tensor, flow._oneflow_internal.Tensor)): raise ValueError("Expected type of input is Tensor") if isinstance(y, (int, float)): x = flow.F.cast(x, flow.float32) y = flow.tensor([y], dtype=flow.float32, device=x.device) elif isinstance(y, (flow.Tensor, flow._oneflow_internal.Tensor)): if x.dtype != y.dtype: x = flow.F.cast(x, flow.float32) y = flow.F.cast(y, flow.float32) else: raise ValueError("Expected type of other is Tensor or Scalar") return flow.F.fmod(x, y) def fmod_op(input, other): """ fmod(input, other, *, out=None) -> Tensor Computes the element-wise remainder of division. The dividend and divisor may contain both for integer and floating point numbers. The remainder has the same sign as the dividend :attr:`input`. Supports broadcasting to a common shape, integer and float inputs. Args: input (Tensor): the dividend other (Tensor or Scalar): the divisor Keyword args: out (Tensor, optional): the output tensor. Example:: >>> import oneflow as flow >>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2) tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32) >>> flow.fmod(flow.tensor([1, 2, 3, 4, 5]), 1.5) tensor([1. , 0.5, 0. , 1. , 0.5], dtype=oneflow.float32) >>> flow.fmod(flow.tensor([1, 2, 3, 4, -5]), flow.tensor([4, 2, 1, 3., 1])) tensor([ 1., 0., 0., 1., -0.], dtype=oneflow.float32) """ return FMod()(input, other) @register_tensor_op("fmod") def fmod_op_tensor(input, other): """ See :func:`oneflow.fmod` """ return FMod()(input, other) class Log(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.log(x) @register_tensor_op("log") def log_op(tensor): """ Returns a new tensor with the natural logarithm of the elements of :attr:`input`. .. math:: y_{i} = \\log_{e} (x_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.random.randn(2, 3, 4, 5) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.log(input) """ return Log()(tensor) class Subtract(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): if isinstance(x, (int, float)): return ScalarAdd(x)(-1 * y) elif isinstance(y, (int, float)): return ScalarAdd(-1 * y)(x) elif x.shape == y.shape: return BroadcastSub()(x, y) elif x.shape == (1,): return ScalarSubByTensor()(y, x) elif y.shape == (1,): return ScalarSubByTensor()(x, y) else: return BroadcastSub()(x, y) class Sqrt(Module): def __init__(self) -> None: super().__init__() def forward(self, input): return flow.F.sqrt(input) @register_tensor_op("rsqrt") def rsqrt_op(input): """Returns a new tensor with the reciprocal of the square-root of each of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\frac{1}{\\sqrt{\\text{input}_{i}}} Args: input (Tensor) – the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> a = flow.Tensor(np.array([1.0, 2.0, 3.0])) >>> out = flow.rsqrt(a).numpy() >>> out array([1. , 0.70710677, 0.57735026], dtype=float32) """ return Rsqrt()(input) class Rsqrt(Module): def __init__(self) -> None: super().__init__() def forward(self, input): return flow.F.rsqrt(input) @register_tensor_op("sqrt") def sqrt_op(input): """Returns a new tensor with the square-root of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\sqrt{\\text{input}_{i}} Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.sqrt(input).numpy() >>> output array([1. , 1.4142135, 1.7320508], dtype=float32) """ return Sqrt()(input) class Square(Module): def __init__(self) -> None: super().__init__() def forward(self, input): return flow.F.square(input) @register_tensor_op("square") def square_op(input): """Returns a new tensor with the square of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\sqrt{\\text{input}_{i}} Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.square(input).numpy() >>> output array([1., 4., 9.], dtype=float32) """ return Square()(input) class Std(Module): def __init__(self, dim=None, unbiased=True, keepdim=False) -> None: super().__init__() assert unbiased == True, "Only support 'unbiased=True' for now!" self.unbiased = unbiased self.keepdim = keepdim self.dim = dim self.reduce_count = 1 self.square_op = Square() self.sqrt_op = Sqrt() self.subtract_op = Subtract() def forward(self, x): self.axis = _check_axis(self.dim, x.shape) if isinstance(self.axis, list) and len(self.axis) == 0: return flow.zeros(size=x.shape) else: if len(self.axis) == 0: self.reduce_count = x.nelement() else: for i in self.axis: self.reduce_count *= x.shape[i] sum = ( flow.sum(self.square_op(x), self.axis, self.keepdim) / self.reduce_count ) square = self.square_op( flow.sum(x, self.axis, self.keepdim) / self.reduce_count ) subtract = self.subtract_op(sum, square) res = self.sqrt_op(subtract) return res @register_tensor_op("std") def std_op(tensor, dim, unbiased=True, keepdim=False): """ Returns the standard-deviation of each row of the :attr:`input` tensor in the dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, reduce over all of them. If keepdim is True, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated via the biased estimator. Otherwise, Bessel's correction will be used. Args: input (Tensor): the input tensor. dim (int or tuple of python:ints): the dimension or dimensions to reduce. unbiased (bool): whether to use the unbiased estimation or not keepdim (bool): whether the output tensor has `dim` retained or not. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.std(input, dim=0).numpy() >>> output array([0.8164968], dtype=float32) """ return Std(dim, unbiased, keepdim)(tensor) class Pow(Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): if isinstance(y, (int, float)): return flow.F.pow_scalar(x, alpha=y) else: return flow.F.pow(x, y) @register_tensor_op("pow") def pow_op(tensor, exponent): """Takes the power of each element in input with exponent and returns a tensor with the result. Exponent can be either a single float number, a single int number, or a tensor with the same shape as input. When exponent is a scalar value, the operation applied is: .. math:: \\text{out}_i = x_i ^ \\text{exponent} \u200b When exponent is a tensor, the operation applied is: .. math:: \\text{out}_i = x_i ^ {\\text{exponent}_i} Args: - input (Tensor): the input tensor. - exponent (int, float, Tensor): the exponent. Returns: Tensor: The result of variance on the specified axis of input Tensor For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = flow.Tensor(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])) >>> out = flow.pow(x, 2).numpy() >>> out array([ 1., 4., 9., 16., 25., 36.], dtype=float32) >>> x = flow.Tensor(np.array([1.0, 2.0, 3.0, 4.0])) >>> y = flow.Tensor(np.array([1.0, 2.0, 3.0, 4.0])) >>> out = flow.pow(x, y).numpy() >>> out array([ 1., 4., 27., 256.], dtype=float32) """ return Pow()(tensor, exponent) class Addmm(Module): def __init__(self) -> None: super().__init__() def forward(self, x, mat1, mat2, alpha=1, beta=1): if len(x.shape) > 2 or len(mat1.shape) > 2 or len(mat2.shape) > 2: raise ValueError("input matrixes shape can not be greater than 2") else: return _mul(x, beta) + _mul(flow.F.matmul(mat1, mat2), alpha) def addmm_op(input, mat1, mat2, alpha=1, beta=1): """addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. The matrix :attr:`input` is added to the final result. If :attr:`mat1` is a :math:`(n \\times m)` tensor, :attr:`mat2` is a :math:`(m \\times p)` tensor, then :attr:`input` must be broadcastable with a :math:`(n \\times p)` tensor and :attr:`out` will be a :math:`(n \\times p)` tensor. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. .. math:: \\text{out} = \\beta\\ \\text{input} + \\alpha\\ (\\text{mat1}_i \\mathbin{@} \\text{mat2}_i) For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` must be real numbers, otherwise they should be integers. Args: beta (Number, optional): multiplier for :attr:`input` (:math:`\\beta`) input (Tensor): matrix to be added alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\\alpha`) mat1 (Tensor): the first matrix to be multiplied mat2 (Tensor): the second matrix to be multiplied out (Tensor, optional): the output tensor. For example: >>> import numpy as np >>> import oneflow as flow >>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]])) >>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]])) >>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]])) >>> output = flow.addmm(input, mat1, mat2) >>> output tensor([[100.68, 33.83, 126.87], [110.01, 43.48, 133.61]], dtype=oneflow.float64) >>> output.shape flow.Size([2, 3]) >>> input2 = flow.tensor(np.array([1.7])) >>> mat1 = flow.tensor(np.array([[1,2],[5,9.1],[7.7,1.4]])) >>> mat2 = flow.tensor(np.array([[1,2,3.7],[5,9.1,6.8]])) >>> output2 = flow.addmm(input2, mat1, mat2, alpha=1, beta=2) >>> output2 tensor([[14.4 , 23.6 , 20.7 ], [53.9 , 96.21, 83.78], [18.1 , 31.54, 41.41]], dtype=oneflow.float64) >>> output2.shape flow.Size([3, 3]) """ return Addmm()(input, mat1, mat2, alpha, beta) @register_tensor_op("addmm") def addmm_op_tensor(input, mat1, mat2, alpha=1, beta=1): """ See :func:`oneflow.addmm` """ return Addmm()(input, mat1, mat2, alpha, beta) class Clamp(Module): def __init__(self, min_value=None, max_value=None) -> None: super().__init__() if min_value is not None: floating_min_value = float(min_value) integral_min_value = int(min_value) if max_value is not None: floating_max_value = float(max_value) integral_max_value = int(max_value) if min_value is not None and max_value is not None: self._op = ( flow.builtin_op("clip_by_scalar") .Input("x") .Output("y") .Attr("floating_min", floating_min_value) .Attr("integral_min", integral_min_value) .Attr("floating_max", floating_max_value) .Attr("integral_max", integral_max_value) .Build() ) elif min_value is not None: self._op = ( flow.builtin_op("clip_by_scalar_min") .Input("x") .Output("y") .Attr("floating_min", floating_min_value) .Attr("integral_min", integral_min_value) .Build() ) elif max_value is not None: self._op = ( flow.builtin_op("clip_by_scalar_max") .Input("x") .Output("y") .Attr("floating_max", floating_max_value) .Attr("integral_max", integral_max_value) .Build() ) else: raise ValueError("min_value and max_value cannot be None at the same time") def forward(self, x): return self._op(x)[0] def clamp_op(tensor, min=None, max=None): """ Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return a resulting tensor: .. math:: y_i = \\begin{cases} \\text{min} & \\text{if } x_i < \\text{min} \\\\ x_i & \\text{if } \\text{min} \\leq x_i \\leq \\text{max} \\\\ \\text{max} & \\text{if } x_i > \\text{max} \\end{cases} If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min` and :attr:`max` must be real numbers, otherwise they should be integers. Args: input (Tensor): the input tensor. min (Number): lower-bound of the range to be clamped to. Defaults to None. max (Number): upper-bound of the range to be clamped to. Defaults to None. out (Tensor, optional): the output tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> arr = np.array([0.2, 0.6, -1.5, -0.3]) >>> input = flow.Tensor(arr) >>> output = flow.clamp(input, min=-0.5, max=0.5) >>> output tensor([ 0.2, 0.5, -0.5, -0.3], dtype=oneflow.float32) >>> arr = np.array([0.2, 0.6, -1.5, -0.3]) >>> input = flow.Tensor(arr) >>> output = flow.clamp(input, min=None, max=0.5) >>> output tensor([ 0.2, 0.5, -1.5, -0.3], dtype=oneflow.float32) >>> arr = np.array([0.2, 0.6, -1.5, -0.3]) >>> input = flow.Tensor(arr) >>> output = flow.clamp(input, min=-0.5, max=None) >>> output tensor([ 0.2, 0.6, -0.5, -0.3], dtype=oneflow.float32) """ return Clamp(min, max)(tensor) @register_tensor_op("clamp") def clamp_op_tensor(tensor, min=None, max=None): """ See :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) def clip_op(tensor, min=None, max=None): """ Alias for :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) @register_tensor_op("clip") def clip_op_tensor(tensor, min=None, max=None): """ See :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) class Cosh(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.cosh(x) @register_tensor_op("cosh") def cosh_op(tensor): """ Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`. .. math:: \\text{out}_{i} = \\cosh(\\text{input}_{i}) Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import numpy as np >>> import oneflow as flow >>> arr = np.array([ 0.1632, 1.1835, -0.6979, -0.7325]) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.cosh(input).numpy() >>> output array([1.0133467, 1.7859949, 1.2535787, 1.2804903], dtype=float32) """ return Cosh()(tensor) class Erf(Module): def __init__(self) -> None: super().__init__() def forward(self, input): return flow.F.erf(input) @register_tensor_op("erf") def erf_op(input): """Computes the error function of each element. The error function is defined as follows: .. math:: \\operatorname{erf}(x)=\\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x} e^{-t^{2}} d t Args: x (oneflow.Tensor): A Tensor Returns: oneflow.Tensor: The result Tensor For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = flow.Tensor(np.array([0, -1., 10.]), dtype=flow.float32) >>> out = flow.erf(x) >>> out.shape flow.Size([3]) >>> out.numpy() array([ 0. , -0.8427008, 1. ], dtype=float32) >>> x = flow.Tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32) >>> out = flow.erf(x) >>> out.shape flow.Size([2, 3]) >>> out.numpy() array([[ 0. , -0.8427008 , 1. ], [ 1. , 1. , 0.74210095]], dtype=float32) >>> x = flow.Tensor(np.array([[0, -1., 10.], [5, 7, 0.8], [2, 3, 4]]), dtype=flow.float32) >>> out = x.erf() >>> out.shape flow.Size([3, 3]) >>> out.numpy() array([[ 0. , -0.8427008 , 1. ], [ 1. , 1. , 0.74210095], [ 0.9953223 , 0.9999779 , 1. ]], dtype=float32) """ return Erf()(input) @register_tensor_op("erf") def erf_op_tensor(input): """ See :func:`oneflow.erf` """ return Erf()(input) class Erfc(Module): def __init__(self) -> None: super().__init__() self.erfc_op = flow.builtin_op("erfc").Input("x").Output("y").Build() def forward(self, input): return self.erfc_op(input)[0] @register_tensor_op("erfc") def erfc_op(input): """Computes the complementary error function of each element of input. The complementary error function is defined as follows: .. math:: \\operatorname{erfc}(x)=1-\\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x} e^{-t^{2}} d t Args: x (oneflow.Tensor): A Tensor Returns: oneflow.Tensor: The result Tensor For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = flow.Tensor(np.array([0, -1., 10.]), dtype=flow.float32) >>> out = flow.erfc(x) >>> out.shape flow.Size([3]) >>> out.numpy() array([1.0000000e+00, 1.8427007e+00, 2.8025969e-45], dtype=float32) >>> x = flow.Tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32) >>> out = flow.erfc(x) >>> out.shape flow.Size([2, 3]) >>> out.numpy() array([[1.0000000e+00, 1.8427007e+00, 2.8025969e-45], [1.5374597e-12, 4.1838257e-23, 2.5789905e-01]], dtype=float32) >>> x = flow.Tensor(np.array([[0, -1., 10.], [5, 7, 0.8], [2, 3, 4]]), dtype=flow.float32) >>> out = x.erfc() >>> out.shape flow.Size([3, 3]) >>> out.numpy() array([[1.0000000e+00, 1.8427007e+00, 2.8025969e-45], [1.5374597e-12, 4.1838257e-23, 2.5789905e-01], [4.6777348e-03, 2.2090499e-05, 1.5417259e-08]], dtype=float32) """ return Erfc()(input) @register_tensor_op("erfc") def erfc_op_tensor(input): """ See :func:`oneflow.erfc` """ return Erfc()(input) class Ceil(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.ceil(x) def ceil_op(x): """Returns a new tensor with the ceil of the elements of :attr:`x`, the smallest integer greater than or equal to each element. The equation is: .. math:: \\text{out}_{i} = \\left\\lceil \\text{input}_{i} \\right\\rceil = \\left\\lfloor \\text{input}_{i} \\right\\rfloor + 1 Args: x (oneflow.Tensor): A Tensor. Returns: oneflow.Tensor: The result Tensor For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = flow.Tensor(np.array([0.1, -2, 3.4]).astype(np.float32)) >>> y = flow.ceil(x) >>> print(y.shape) flow.Size([3]) >>> print(y.numpy()) [ 1. -2. 4.] >>> x = flow.Tensor(np.array([[2.5, 4.6, 0.6],[7.8, 8.3, 9.2]]).astype(np.float32)) >>> y = x.ceil() >>> print(y.shape) flow.Size([2, 3]) >>> print(y.numpy()) [[ 3. 5. 1.] [ 8. 9. 10.]] >>> x = flow.Tensor(np.array([[[2.2, 4.4, 6.5],[7.1, 8.2, 9.3]],[[10.6,11.2,12.2],[13.5,14.8,15.9]]]).astype(np.float32)) >>> y = flow.ceil(x) >>> print(y.shape) flow.Size([2, 2, 3]) >>> print(y.numpy()) [[[ 3. 5. 7.] [ 8. 9. 10.]] <BLANKLINE> [[11. 12. 13.] [14. 15. 16.]]] """ return Ceil()(x) @register_tensor_op("ceil") def ceil_op_tensor(x): """ See :func:`oneflow.ceil` """ return Ceil()(x) class Expm1(Module): def __init__(self) -> None: super().__init__() def forward(self, x): return flow.F.expm1(x) def expm1_op(x): """Returns a new tensor with the exponential of the elements minus 1 of :attr:`x`. The equation is: .. math:: y_{i} = e^{x_{i}} - 1 Args: x (oneflow.Tensor): A Tensor. Returns: oneflow.Tensor: The result Tensor For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = flow.Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> y = flow.expm1(x) >>> y.shape flow.Size([3]) >>> y tensor([ 1.7183, 6.3891, 19.0855], dtype=oneflow.float32) >>> x = flow.Tensor(np.array([[2, 4, 6],[7, 8, 9]]).astype(np.float32)) >>> y = x.expm1() >>> y.shape flow.Size([2, 3]) >>> y tensor([[6.3891e+00, 5.3598e+01, 4.0243e+02], [1.0956e+03, 2.9800e+03, 8.1021e+03]], dtype=oneflow.float32) >>> x = flow.Tensor(np.array([[[2, 4, 6],[7, 8, 9]],[[10,11,12],[13,14,15]]]).astype(np.float32)) >>> y = flow.expm1(x) >>> print(y.shape) flow.Size([2, 2, 3]) >>> print(y.numpy()) [[[6.3890562e+00 5.3598152e+01 4.0242880e+02] [1.0956332e+03 2.9799580e+03 8.1020840e+03]] <BLANKLINE> [[2.2025465e+04 5.9873141e+04 1.6275380e+05] [4.4241238e+05 1.2026032e+06 3.2690165e+06]]] """ return Expm1()(x) @register_tensor_op("expm1") def expm1_op_tensor(x): """ See :func:`oneflow.expm1` """ return Expm1()(x) class Topk(Module): def __init__( self, k, dim: int = None, largest: bool = True, sorted: bool = True ) -> None: super().__init__() self._op_topk_last_dim = ( flow.builtin_op("top_k") .Input("in") .Output("out") .Attr("k", k) .Attr("sorted", sorted) .Build() ) self.dim = dim self.largest = largest def forward(self, input): if self.dim == None: self.dim = -1 num_axes = len(input.shape) axis = self.dim if self.dim >= 0 else self.dim + num_axes assert 0 <= axis < num_axes, "axis out of range" if axis == num_axes - 1: if self.largest: indices = self._op_topk_last_dim(input)[0] else: neg_input = flow.mul(input, -1) indices = self._op_topk_last_dim(neg_input)[0] return (flow.gather(input, indices, dim=axis), indices) else: perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) x = flow.F.transpose(input, perm=perm) if self.largest: indices = self._op_topk_last_dim(x)[0] else: neg_input = flow.mul(x, -1) indices = self._op_topk_last_dim(neg_input)[0] indices = flow.F.transpose(indices, perm=get_inversed_perm(perm)) return (flow.gather(input, indices, dim=axis), indices) @register_tensor_op("topk") def topk_op(input, k, dim: int = None, largest: bool = True, sorted: bool = True): """Finds the values and indices of the k largest entries at specified axis. Args: input (oneflow.Tensor): Input Tensor dim (int, optional): the dimension to sort along. Defaults to the last dim (-1) largest (bool, optional): controls whether to return largest or smallest elements sorted (bool, optional): controls whether to return the elements in sorted order Returns: Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int32)): A tuple of (values, indices), where the indices are the indices of the elements in the original input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32) >>> (values, indices) = flow.topk(flow.Tensor(x), k=3, dim=1) >>> values tensor([[8., 7., 3.], [9., 4., 3.]], dtype=oneflow.float32) >>> indices tensor([[2, 3, 1], [1, 2, 3]], dtype=oneflow.int32) >>> values.shape flow.Size([2, 3]) >>> indices.shape flow.Size([2, 3]) >>> (values, indices) = flow.topk(flow.Tensor(x), k=2, dim=1, largest=False) >>> values tensor([[1., 2.], [1., 2.]], dtype=oneflow.float32) >>> indices tensor([[0, 4], [0, 4]], dtype=oneflow.int32) >>> values.shape flow.Size([2, 2]) >>> indices.shape flow.Size([2, 2]) """ return Topk(k=k, dim=dim, largest=largest, sorted=sorted)(input) if __name__ == "__main__": import doctest doctest.testmod(raise_on_error=True)
[ "oneflow.framework.tensor.register_tensor_op", "oneflow.F.broadcast_mul", "oneflow.F.transpose", "oneflow.F.cosh", "oneflow.F.log", "oneflow.F.add", "oneflow.F.asinh", "oneflow.F.rsqrt", "oneflow.sum", "oneflow.F.cast", "oneflow.F.broadcast_add", "oneflow.F.mul", "oneflow.F.sqrt", "oneflow.tensor", "oneflow.F.broadcast_sub", "oneflow.mean", "oneflow.nn.modules.utils._check_inplace_valid", "oneflow.F.pow", "oneflow.F.reciprocal_no_nan", "oneflow.F.expm1", "oneflow.F.add_scalar_by_tensor", "oneflow.F.sub_scalar_by_tensor", "oneflow.F.cos", "oneflow.F.fmod", "oneflow.reciprocal", "oneflow.F.mul_scalar", "oneflow.nn.modules.utils._check_axis", "oneflow.F.square", "oneflow.mul", "oneflow.F.asin", "oneflow.F.add_scalar", "oneflow.F.broadcast_div", "oneflow.builtin_op", "oneflow.F.pow_scalar", "oneflow.F.atan", "oneflow.square", "oneflow.F.ceil", "oneflow.F.matmul", "oneflow.F.erf", "oneflow.F.sin", "oneflow.zeros", "oneflow.gather", "oneflow.F.mul_scalar_by_tensor", "oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim", "oneflow.ops.transpose_util.get_inversed_perm", "oneflow.broadcast_like", "oneflow.F.div_scalar_by_tensor" ]
[((1755, 1780), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""mul"""'], {}), "('mul')\n", (1773, 1780), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((3779, 3804), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""var"""'], {}), "('var')\n", (3797, 3804), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((5673, 5698), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sub"""'], {}), "('sub')\n", (5691, 5698), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((7414, 7439), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""div"""'], {}), "('div')\n", (7432, 7439), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((9205, 9237), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""reciprocal"""'], {}), "('reciprocal')\n", (9223, 9237), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((10509, 10534), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""add"""'], {}), "('add')\n", (10527, 10534), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((11804, 11830), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""add_"""'], {}), "('add_')\n", (11822, 11830), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((13492, 13518), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""asin"""'], {}), "('asin')\n", (13510, 13518), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((13723, 13751), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""arcsin"""'], {}), "('arcsin')\n", (13741, 13751), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((15057, 15084), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""asinh"""'], {}), "('asinh')\n", (15075, 15084), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((15189, 15218), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""arcsinh"""'], {}), "('arcsinh')\n", (15207, 15218), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((16430, 16455), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sin"""'], {}), "('sin')\n", (16448, 16455), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((16595, 16621), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sin_"""'], {}), "('sin_')\n", (16613, 16621), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((16888, 16913), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""cos"""'], {}), "('cos')\n", (16906, 16913), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((18154, 18180), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""atan"""'], {}), "('atan')\n", (18172, 18180), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((18396, 18424), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""arctan"""'], {}), "('arctan')\n", (18414, 18424), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((20296, 20322), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""fmod"""'], {}), "('fmod')\n", (20314, 20322), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((20579, 20604), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""log"""'], {}), "('log')\n", (20597, 20604), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((21820, 21847), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""rsqrt"""'], {}), "('rsqrt')\n", (21838, 21847), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((22656, 22682), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sqrt"""'], {}), "('sqrt')\n", (22674, 22682), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((23485, 23513), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""square"""'], {}), "('square')\n", (23503, 23513), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((25313, 25338), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""std"""'], {}), "('std')\n", (25331, 25338), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((26886, 26911), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""pow"""'], {}), "('pow')\n", (26904, 26911), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((30996, 31023), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""addmm"""'], {}), "('addmm')\n", (31014, 31023), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((34545, 34572), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""clamp"""'], {}), "('clamp')\n", (34563, 34572), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((34836, 34862), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""clip"""'], {}), "('clip')\n", (34854, 34862), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((35133, 35159), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""cosh"""'], {}), "('cosh')\n", (35151, 35159), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((35961, 35986), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""erf"""'], {}), "('erf')\n", (35979, 35986), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((37423, 37448), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""erf"""'], {}), "('erf')\n", (37441, 37448), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((37774, 37800), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""erfc"""'], {}), "('erfc')\n", (37792, 37800), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((39316, 39342), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""erfc"""'], {}), "('erfc')\n", (39334, 39342), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((40985, 41011), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""ceil"""'], {}), "('ceil')\n", (41003, 41011), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((42672, 42699), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""expm1"""'], {}), "('expm1')\n", (42690, 42699), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((44283, 44309), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""topk"""'], {}), "('topk')\n", (44301, 44309), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((46056, 46092), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (46071, 46092), False, 'import doctest\n'), ((1232, 1264), 'oneflow.F.mul_scalar', 'flow.F.mul_scalar', (['x', 'self.alpha'], {}), '(x, self.alpha)\n', (1249, 1264), True, 'import oneflow as flow\n'), ((1404, 1437), 'oneflow.F.mul_scalar_by_tensor', 'flow.F.mul_scalar_by_tensor', (['x', 'y'], {}), '(x, y)\n', (1431, 1437), True, 'import oneflow as flow\n'), ((1574, 1590), 'oneflow.F.mul', 'flow.F.mul', (['x', 'y'], {}), '(x, y)\n', (1584, 1590), True, 'import oneflow as flow\n'), ((1725, 1751), 'oneflow.F.broadcast_mul', 'flow.F.broadcast_mul', (['x', 'y'], {}), '(x, y)\n', (1745, 1751), True, 'import oneflow as flow\n'), ((3448, 3482), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['self.dim', 'input.shape'], {}), '(self.dim, input.shape)\n', (3459, 3482), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((5008, 5041), 'oneflow.F.sub_scalar_by_tensor', 'flow.F.sub_scalar_by_tensor', (['x', 'y'], {}), '(x, y)\n', (5035, 5041), True, 'import oneflow as flow\n'), ((5176, 5202), 'oneflow.F.broadcast_sub', 'flow.F.broadcast_sub', (['x', 'y'], {}), '(x, y)\n', (5196, 5202), True, 'import oneflow as flow\n'), ((5623, 5669), 'oneflow.F.add_scalar', 'flow.F.add_scalar', (['x', 'self.alpha', 'self.inplace'], {}), '(x, self.alpha, self.inplace)\n', (5640, 5669), True, 'import oneflow as flow\n'), ((7201, 7227), 'oneflow.F.broadcast_div', 'flow.F.broadcast_div', (['x', 'y'], {}), '(x, y)\n', (7221, 7227), True, 'import oneflow as flow\n'), ((7372, 7410), 'oneflow.F.div_scalar_by_tensor', 'flow.F.div_scalar_by_tensor', (['x', 'scalar'], {}), '(x, scalar)\n', (7399, 7410), True, 'import oneflow as flow\n'), ((9174, 9201), 'oneflow.F.reciprocal_no_nan', 'flow.F.reciprocal_no_nan', (['x'], {}), '(x)\n', (9198, 9201), True, 'import oneflow as flow\n'), ((10015, 10062), 'oneflow.F.add_scalar_by_tensor', 'flow.F.add_scalar_by_tensor', (['x', 'y', 'self.inplace'], {}), '(x, y, self.inplace)\n', (10042, 10062), True, 'import oneflow as flow\n'), ((10314, 10344), 'oneflow.F.add', 'flow.F.add', (['x', 'y', 'self.inplace'], {}), '(x, y, self.inplace)\n', (10324, 10344), True, 'import oneflow as flow\n'), ((10479, 10505), 'oneflow.F.broadcast_add', 'flow.F.broadcast_add', (['x', 'y'], {}), '(x, y)\n', (10499, 10505), True, 'import oneflow as flow\n'), ((12534, 12548), 'oneflow.F.asin', 'flow.F.asin', (['x'], {}), '(x)\n', (12545, 12548), True, 'import oneflow as flow\n'), ((13976, 13991), 'oneflow.F.asinh', 'flow.F.asinh', (['x'], {}), '(x)\n', (13988, 13991), True, 'import oneflow as flow\n'), ((15559, 15586), 'oneflow.F.sin', 'flow.F.sin', (['x', 'self.inplace'], {}), '(x, self.inplace)\n', (15569, 15586), True, 'import oneflow as flow\n'), ((16871, 16884), 'oneflow.F.cos', 'flow.F.cos', (['x'], {}), '(x)\n', (16881, 16884), True, 'import oneflow as flow\n'), ((17577, 17591), 'oneflow.F.atan', 'flow.F.atan', (['x'], {}), '(x)\n', (17588, 17591), True, 'import oneflow as flow\n'), ((19252, 19269), 'oneflow.F.fmod', 'flow.F.fmod', (['x', 'y'], {}), '(x, y)\n', (19263, 19269), True, 'import oneflow as flow\n'), ((20562, 20575), 'oneflow.F.log', 'flow.F.log', (['x'], {}), '(x)\n', (20572, 20575), True, 'import oneflow as flow\n'), ((21798, 21816), 'oneflow.F.sqrt', 'flow.F.sqrt', (['input'], {}), '(input)\n', (21809, 21816), True, 'import oneflow as flow\n'), ((22633, 22652), 'oneflow.F.rsqrt', 'flow.F.rsqrt', (['input'], {}), '(input)\n', (22645, 22652), True, 'import oneflow as flow\n'), ((23461, 23481), 'oneflow.F.square', 'flow.F.square', (['input'], {}), '(input)\n', (23474, 23481), True, 'import oneflow as flow\n'), ((24602, 24632), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['self.dim', 'x.shape'], {}), '(self.dim, x.shape)\n', (24613, 24632), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((35115, 35129), 'oneflow.F.cosh', 'flow.F.cosh', (['x'], {}), '(x)\n', (35126, 35129), True, 'import oneflow as flow\n'), ((35940, 35957), 'oneflow.F.erf', 'flow.F.erf', (['input'], {}), '(input)\n', (35950, 35957), True, 'import oneflow as flow\n'), ((39563, 39577), 'oneflow.F.ceil', 'flow.F.ceil', (['x'], {}), '(x)\n', (39574, 39577), True, 'import oneflow as flow\n'), ((41225, 41240), 'oneflow.F.expm1', 'flow.F.expm1', (['x'], {}), '(x)\n', (41237, 41240), True, 'import oneflow as flow\n'), ((3556, 3584), 'oneflow.zeros', 'flow.zeros', ([], {'size': 'input.shape'}), '(size=input.shape)\n', (3566, 3584), True, 'import oneflow as flow\n'), ((5584, 5607), 'oneflow.nn.modules.utils._check_inplace_valid', '_check_inplace_valid', (['x'], {}), '(x)\n', (5604, 5607), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((8610, 8632), 'oneflow.reciprocal', 'flow.reciprocal', (['other'], {}), '(other)\n', (8625, 8632), True, 'import oneflow as flow\n'), ((9976, 9999), 'oneflow.nn.modules.utils._check_inplace_valid', '_check_inplace_valid', (['x'], {}), '(x)\n', (9996, 9999), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((10275, 10298), 'oneflow.nn.modules.utils._check_inplace_valid', '_check_inplace_valid', (['x'], {}), '(x)\n', (10295, 10298), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((15520, 15543), 'oneflow.nn.modules.utils._check_inplace_valid', '_check_inplace_valid', (['x'], {}), '(x)\n', (15540, 15543), False, 'from oneflow.nn.modules.utils import _check_axis, _check_inplace_valid\n'), ((18842, 18870), 'oneflow.F.cast', 'flow.F.cast', (['x', 'flow.float32'], {}), '(x, flow.float32)\n', (18853, 18870), True, 'import oneflow as flow\n'), ((18887, 18940), 'oneflow.tensor', 'flow.tensor', (['[y]'], {'dtype': 'flow.float32', 'device': 'x.device'}), '([y], dtype=flow.float32, device=x.device)\n', (18898, 18940), True, 'import oneflow as flow\n'), ((24716, 24740), 'oneflow.zeros', 'flow.zeros', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (24726, 24740), True, 'import oneflow as flow\n'), ((26803, 26832), 'oneflow.F.pow_scalar', 'flow.F.pow_scalar', (['x'], {'alpha': 'y'}), '(x, alpha=y)\n', (26820, 26832), True, 'import oneflow as flow\n'), ((26866, 26882), 'oneflow.F.pow', 'flow.F.pow', (['x', 'y'], {}), '(x, y)\n', (26876, 26882), True, 'import oneflow as flow\n'), ((43817, 43873), 'oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_axes', 'axis'], {}), '(num_axes, axis)\n', (43857, 43873), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((43890, 43924), 'oneflow.F.transpose', 'flow.F.transpose', (['input'], {'perm': 'perm'}), '(input, perm=perm)\n', (43906, 43924), True, 'import oneflow as flow\n'), ((43633, 43652), 'oneflow.mul', 'flow.mul', (['input', '(-1)'], {}), '(input, -1)\n', (43641, 43652), True, 'import oneflow as flow\n'), ((43736, 43773), 'oneflow.gather', 'flow.gather', (['input', 'indices'], {'dim': 'axis'}), '(input, indices, dim=axis)\n', (43747, 43773), True, 'import oneflow as flow\n'), ((44055, 44070), 'oneflow.mul', 'flow.mul', (['x', '(-1)'], {}), '(x, -1)\n', (44063, 44070), True, 'import oneflow as flow\n'), ((44232, 44269), 'oneflow.gather', 'flow.gather', (['input', 'indices'], {'dim': 'axis'}), '(input, indices, dim=axis)\n', (44243, 44269), True, 'import oneflow as flow\n'), ((3654, 3672), 'oneflow.square', 'flow.square', (['input'], {}), '(input)\n', (3665, 3672), True, 'import oneflow as flow\n'), ((3723, 3759), 'oneflow.mean', 'flow.mean', (['input', 'axis', 'self.keepdim'], {}), '(input, axis, self.keepdim)\n', (3732, 3759), True, 'import oneflow as flow\n'), ((12335, 12360), 'oneflow.broadcast_like', 'flow.broadcast_like', (['y', 'x'], {}), '(y, x)\n', (12354, 12360), True, 'import oneflow as flow\n'), ((19070, 19098), 'oneflow.F.cast', 'flow.F.cast', (['x', 'flow.float32'], {}), '(x, flow.float32)\n', (19081, 19098), True, 'import oneflow as flow\n'), ((19119, 19147), 'oneflow.F.cast', 'flow.F.cast', (['y', 'flow.float32'], {}), '(y, flow.float32)\n', (19130, 19147), True, 'import oneflow as flow\n'), ((25122, 25158), 'oneflow.sum', 'flow.sum', (['x', 'self.axis', 'self.keepdim'], {}), '(x, self.axis, self.keepdim)\n', (25130, 25158), True, 'import oneflow as flow\n'), ((28548, 28573), 'oneflow.F.matmul', 'flow.F.matmul', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (28561, 28573), True, 'import oneflow as flow\n'), ((44187, 44210), 'oneflow.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (44204, 44210), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((37647, 37670), 'oneflow.builtin_op', 'flow.builtin_op', (['"""erfc"""'], {}), "('erfc')\n", (37662, 37670), True, 'import oneflow as flow\n'), ((42997, 43021), 'oneflow.builtin_op', 'flow.builtin_op', (['"""top_k"""'], {}), "('top_k')\n", (43012, 43021), True, 'import oneflow as flow\n'), ((32096, 32133), 'oneflow.builtin_op', 'flow.builtin_op', (['"""clip_by_scalar_min"""'], {}), "('clip_by_scalar_min')\n", (32111, 32133), True, 'import oneflow as flow\n'), ((32423, 32460), 'oneflow.builtin_op', 'flow.builtin_op', (['"""clip_by_scalar_max"""'], {}), "('clip_by_scalar_max')\n", (32438, 32460), True, 'import oneflow as flow\n'), ((31657, 31690), 'oneflow.builtin_op', 'flow.builtin_op', (['"""clip_by_scalar"""'], {}), "('clip_by_scalar')\n", (31672, 31690), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from random import randint from random import choice import numpy as np import oneflow as flow from test_xrt import * class TestXrtReLU(flow.unittest.TestCase): def test_xrt_relu(test_case): x = np.random.random((1, 10, 2)).astype(np.float32) x_cpu = flow.tensor(x, dtype=flow.float32, device=flow.device("cpu")) x_cuda = flow.tensor(x, dtype=flow.float32, device=flow.device("cuda")) relu_g = generate_graph(flow.relu) out = relu_g(x_cpu) test_xrt_openvino(test_case, generate_graph(flow.relu), x_cpu, out) test_xrt_tensorrt(test_case, generate_graph(flow.relu), x_cuda, out) test_xrt_xla(test_case, generate_graph(flow.relu), x_cuda, out) if __name__ == "__main__": unittest.main()
[ "oneflow.device" ]
[((1355, 1370), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1368, 1370), False, 'import unittest\n'), ((818, 846), 'numpy.random.random', 'np.random.random', (['(1, 10, 2)'], {}), '((1, 10, 2))\n', (834, 846), True, 'import numpy as np\n'), ((924, 942), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (935, 942), True, 'import oneflow as flow\n'), ((1003, 1022), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1014, 1022), True, 'import oneflow as flow\n')]
from transformer import Transformer import numpy as np import math import sys import oneflow as flow import oneflow.nn as nn sys.path.append("../") TO_CUDA = True def to_cuda(tensor, flag=TO_CUDA, where="cuda"): if flag: return tensor.to(where) else: return tensor class Embeddings(nn.Module): def __init__(self, vocab, d_model): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = flow.zeros((max_len, d_model)) position = flow.arange(0, max_len, dtype=flow.float).unsqueeze(1) div_term = flow.exp( flow.arange(0, d_model, 2).to(flow.float) * (-math.log(10000.0) / d_model) ).unsqueeze(0) pe[:, 0::2] = flow.sin(position * div_term) pe[:, 1::2] = flow.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.pe = flow.nn.Parameter(pe, requires_grad=False) def forward(self, x): x = x + self.pe[: x.size(0), :] return self.dropout(x) class TransformerModel(nn.Module): def __init__( self, input_sz, output_sz, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout, ): super(TransformerModel, self).__init__() self.transformer = Transformer( d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=False, ) self.softmax = nn.Softmax(dim=2) self.linear = nn.Linear(d_model, output_sz) self.pos_encoder = PositionalEncoding(d_model, dropout) self.pos_decoder = PositionalEncoding(d_model, dropout) self.src_embedding = Embeddings(input_sz, d_model) self.tgt_embedding = Embeddings(output_sz, d_model) @staticmethod def generate_subsequent_mask(tgt_len, src_len): mask = flow.triu(flow.ones((tgt_len, src_len)), 1) mask = mask.masked_fill(mask.to(flow.int32), float("-inf")) return mask @staticmethod def make_len_mask(inp): inp_mask = (inp.numpy() == 0).astype(np.int32) inp_mask = flow.tensor(inp_mask, dtype=flow.int32) return inp_mask.transpose(0, 1) def forward( self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None, ): if tgt_mask is None: tgt_mask = self.generate_subsequent_mask(tgt.shape[0], tgt.shape[0]) tgt_mask = to_cuda(tgt_mask, where=tgt.device) src_key_padding_mask = self.make_len_mask(src) src_key_padding_mask = to_cuda(src_key_padding_mask, where=tgt.device) tgt_key_padding_mask = None src = self.src_embedding(src) src = self.pos_encoder(src) tgt = self.tgt_embedding(tgt) tgt = self.pos_decoder(tgt) out = self.transformer( src, tgt, src_mask, tgt_mask, memory_mask, src_key_padding_mask, tgt_key_padding_mask, memory_key_padding_mask, ) out = self.linear(out) return out
[ "oneflow.nn.Softmax", "oneflow.nn.Parameter", "oneflow.nn.Linear", "oneflow.nn.Embedding", "oneflow.cos", "oneflow.zeros", "oneflow.arange", "oneflow.sin", "oneflow.nn.Dropout", "oneflow.tensor", "oneflow.ones" ]
[((127, 149), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (142, 149), False, 'import sys\n'), ((427, 455), 'oneflow.nn.Embedding', 'nn.Embedding', (['vocab', 'd_model'], {}), '(vocab, d_model)\n', (439, 455), True, 'import oneflow.nn as nn\n'), ((740, 761), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (750, 761), True, 'import oneflow.nn as nn\n'), ((776, 806), 'oneflow.zeros', 'flow.zeros', (['(max_len, d_model)'], {}), '((max_len, d_model))\n', (786, 806), True, 'import oneflow as flow\n'), ((1042, 1071), 'oneflow.sin', 'flow.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (1050, 1071), True, 'import oneflow as flow\n'), ((1094, 1123), 'oneflow.cos', 'flow.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (1102, 1123), True, 'import oneflow as flow\n'), ((1187, 1229), 'oneflow.nn.Parameter', 'flow.nn.Parameter', (['pe'], {'requires_grad': '(False)'}), '(pe, requires_grad=False)\n', (1204, 1229), True, 'import oneflow as flow\n'), ((1647, 1844), 'transformer.Transformer', 'Transformer', ([], {'d_model': 'd_model', 'nhead': 'nhead', 'num_encoder_layers': 'num_encoder_layers', 'num_decoder_layers': 'num_decoder_layers', 'dim_feedforward': 'dim_feedforward', 'dropout': 'dropout', 'batch_first': '(False)'}), '(d_model=d_model, nhead=nhead, num_encoder_layers=\n num_encoder_layers, num_decoder_layers=num_decoder_layers,\n dim_feedforward=dim_feedforward, dropout=dropout, batch_first=False)\n', (1658, 1844), False, 'from transformer import Transformer\n'), ((1954, 1971), 'oneflow.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (1964, 1971), True, 'import oneflow.nn as nn\n'), ((1994, 2023), 'oneflow.nn.Linear', 'nn.Linear', (['d_model', 'output_sz'], {}), '(d_model, output_sz)\n', (2003, 2023), True, 'import oneflow.nn as nn\n'), ((2610, 2649), 'oneflow.tensor', 'flow.tensor', (['inp_mask'], {'dtype': 'flow.int32'}), '(inp_mask, dtype=flow.int32)\n', (2621, 2649), True, 'import oneflow as flow\n'), ((543, 566), 'math.sqrt', 'math.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (552, 566), False, 'import math\n'), ((2367, 2396), 'oneflow.ones', 'flow.ones', (['(tgt_len, src_len)'], {}), '((tgt_len, src_len))\n', (2376, 2396), True, 'import oneflow as flow\n'), ((826, 867), 'oneflow.arange', 'flow.arange', (['(0)', 'max_len'], {'dtype': 'flow.float'}), '(0, max_len, dtype=flow.float)\n', (837, 867), True, 'import oneflow as flow\n'), ((922, 948), 'oneflow.arange', 'flow.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (933, 948), True, 'import oneflow as flow\n'), ((968, 985), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (976, 985), False, 'import math\n')]
""" Modified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/registry.py """ import sys import re import fnmatch from collections import defaultdict from copy import deepcopy from tabulate import tabulate import oneflow as flow def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())] class ModelCreator(object): _model_list = defaultdict( set ) # only contain model, and if it has pretrained or not, e.g. {'alexnet': True} # _model_with_module = defaultdict(set) # contain model and its module _model_entrypoints = {} _model_to_module = {} @staticmethod def register_model(fn): mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split(".") module_name = module_name_split[-1] if len(module_name_split) else "" model_name = fn.__name__ ModelCreator._model_entrypoints[model_name] = fn ModelCreator._model_to_module[model_name] = module_name has_pretrained = False if hasattr(mod, "model_urls") and model_name in mod.model_urls: has_pretrained = True if mod.model_urls[model_name] else False ModelCreator._model_list[model_name] = has_pretrained return fn @staticmethod def create_model( model_name: str, pretrained: bool = False, checkpoint: str = None, **kwargs ): if model_name in ModelCreator._model_entrypoints: create_fn = ModelCreator._model_entrypoints[model_name] else: raise RuntimeError("Unknown model (%s)" % model_name) model = create_fn(pretrained=pretrained, **kwargs) if checkpoint is not None: state_dict = flow.load(checkpoint) model.load_state_dict(state_dict) return model @staticmethod def model_table(filter="", pretrained=False, **kwargs): all_models = ModelCreator._model_entrypoints.keys() if filter: models = [] include_filters = filter if isinstance(filter, (tuple, list)) else [filter] for f in include_filters: include_models = fnmatch.filter(all_models, f) if len(include_models): models = set(models).union(include_models) else: models = all_models show_dict = {} sorted_model = list(sorted(models)) if pretrained: for model in sorted_model: if ModelCreator._model_list[model]: show_dict[model] = ModelCreator._model_list[model] else: for model in sorted_model: show_dict[model] = ModelCreator._model_list[model] table_headers = ["Supported Models", "Pretrained"] table_items = [ (k, "true" if show_dict[k] else "false") for k in show_dict.keys() ] table = tabulate( table_items, headers=table_headers, tablefmt="fancy_grid", **kwargs ) return table @staticmethod def model_list(filter="", pretrained=False, **kwargs): all_models = ModelCreator._model_entrypoints.keys() if filter: models = [] include_filters = filter if isinstance(filter, (tuple, list)) else [filter] for f in include_filters: include_models = fnmatch.filter(all_models, f) if len(include_models): models = set(models).union(include_models) else: models = all_models sorted_model = list(sorted(models)) if pretrained: for model in sorted_model: if not ModelCreator._model_list[model]: sorted_model.remove(model) return sorted_model def __repr__(self) -> str: all_model_table = ModelCreator.model_table("") return "Registry of all models:\n" + all_model_table __str__ = __repr__
[ "oneflow.load" ]
[((424, 440), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (435, 440), False, 'from collections import defaultdict\n'), ((2929, 3006), 'tabulate.tabulate', 'tabulate', (['table_items'], {'headers': 'table_headers', 'tablefmt': '"""fancy_grid"""'}), "(table_items, headers=table_headers, tablefmt='fancy_grid', **kwargs)\n", (2937, 3006), False, 'from tabulate import tabulate\n'), ((1758, 1779), 'oneflow.load', 'flow.load', (['checkpoint'], {}), '(checkpoint)\n', (1767, 1779), True, 'import oneflow as flow\n'), ((2188, 2217), 'fnmatch.filter', 'fnmatch.filter', (['all_models', 'f'], {}), '(all_models, f)\n', (2202, 2217), False, 'import fnmatch\n'), ((3390, 3419), 'fnmatch.filter', 'fnmatch.filter', (['all_models', 'f'], {}), '(all_models, f)\n', (3404, 3419), False, 'import fnmatch\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import oneflow.python.framework.c_api_util as c_api_util import oneflow.core.job.placement_pb2 as placement_pb import functools class Symbol(object): def __init__(self, symbol_id, data): self.symbol_id_ = symbol_id self.data_ = data @property def symbol_id(self): return self.symbol_id_ @property def data(self): return self.data_ class ParallelDescSymbol(Symbol): def __init__(self, symbol_id, parallel_conf): Symbol.__init__(self, symbol_id, parallel_conf) self.device_tag_ = parallel_conf.device_tag() self.machine_id2device_id_list_ = MakeMachineId2DeviceIdList(parallel_conf) sub_parallel_nums = [len(v) for k, v in self.machine_id2device_id_list_.items()] self.parallel_num_ = functools.reduce(lambda a, b: a + b, sub_parallel_nums, 0) self.hash_ = hash(self.device_tag_) ^ hash(str(self.machine_id2device_id_list_)) def __hash__(self): return self.hash_ def __eq__(lhs, rhs): return ( lhs.device_tag_ == rhs.device_tag_ and lhs.machine_id2device_id_list_ == rhs.machine_id2device_id_list_ ) def __str__(self): return str(self.parallel_conf) @property def parallel_conf(self): return self.data @property def parallel_num(self): return self.parallel_num_ @property def device_tag(self): return self.device_tag_ @property def machine_id2device_id_list(self): return self.machine_id2device_id_list_ def Containing(self, other): if self.device_tag != other.device_tag: return False return _GlobalDeviceIdsContaining( self.machine_id2device_id_list, other.machine_id2device_id_list, ) def _GlobalDeviceIdsContaining(bigger, smaller): for machine_id, device_ids in smaller.items(): if machine_id not in bigger: return False bigger_device_ids = bigger[machine_id] for device_id in device_ids: if device_id not in bigger_device_ids: return False return True def MakeMachineId2DeviceIdList(parallel_conf): parallel_conf_str = str(parallel_conf) global _parallel_conf_str2ofrecord if parallel_conf_str not in _parallel_conf_str2ofrecord: ofrecord = c_api_util.GetMachine2DeviceIdListOFRecordFromParallelConf( parallel_conf ) _parallel_conf_str2ofrecord[parallel_conf_str] = { int(k): list(v.int32_list.value) for k, v in ofrecord.feature.items() } return _parallel_conf_str2ofrecord[parallel_conf_str] _parallel_conf_str2ofrecord = {}
[ "oneflow.python.framework.c_api_util.GetMachine2DeviceIdListOFRecordFromParallelConf" ]
[((1415, 1473), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'sub_parallel_nums', '(0)'], {}), '(lambda a, b: a + b, sub_parallel_nums, 0)\n', (1431, 1473), False, 'import functools\n'), ((2973, 3046), 'oneflow.python.framework.c_api_util.GetMachine2DeviceIdListOFRecordFromParallelConf', 'c_api_util.GetMachine2DeviceIdListOFRecordFromParallelConf', (['parallel_conf'], {}), '(parallel_conf)\n', (3031, 3046), True, 'import oneflow.python.framework.c_api_util as c_api_util\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb import oneflow.core.job.mirrored_parallel_pb2 as mirrored_parallel_pb from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype class OpArgBlobAttribute(object): def __init__(self, batch_axis, blob_desc, logical_blob_name): self.batch_axis_ = batch_axis self.blob_desc_ = blob_desc self.shape_ = tuple(self.blob_desc_.body.shape.dim) self.logical_blob_name_ = logical_blob_name def __eq__(self, rhs): return ( self.shape == rhs.shape and self.dtype == rhs.dtype and self.batch_axis == rhs.batch_axis and self.is_tensor_list == rhs.is_tensor_list and self.is_dynamic == rhs.is_dynamic and self.logical_blob_name == rhs.logical_blob_name ) @property def shape(self): return self.shape_ @property def dtype(self): return convert_proto_dtype_to_oneflow_dtype(self.blob_desc_.body.data_type) @property def batch_axis(self): return self.batch_axis_ @property def is_tensor_list(self): return self.blob_desc_.is_tensor_list @property def is_dynamic(self): return self.blob_desc_.is_dynamic @property def logical_blob_name(self): return self.logical_blob_name_ def DumpToOpNodeSignature(self, bn_in_op, op_node_signature): blob_sig = op_node_signature.logical_blob_desc_signature.bn_in_op2blob_desc assert bn_in_op not in blob_sig blob_sig[bn_in_op].CopyFrom(self.blob_desc_) batch_axis_sig = op_node_signature.batch_axis_signature.bn_in_op2batch_axis assert bn_in_op not in batch_axis_sig batch_axis_sig[bn_in_op].CopyFrom(self.batch_axis_) def DumpToToInterfaceBlobConf(self, interface_blob_conf): interface_blob_conf.shape.dim.extend(self.shape) interface_blob_conf.data_type = self.blob_desc_.body.data_type interface_blob_conf.is_dynamic = self.is_dynamic interface_blob_conf.is_tensor_list = self.is_tensor_list interface_blob_conf.batch_axis.CopyFrom(self.batch_axis) class OpArgParallelAttribute(object): def __init__(self, parallel_desc_symbol, sbp_parallel, opt_mirrored_parallel): self.parallel_desc_symbol_ = parallel_desc_symbol self.sbp_parallel_ = sbp_parallel self.opt_mirrored_parallel_ = opt_mirrored_parallel self.hash_ = self._Hash() @property def parallel_desc_symbol(self): return self.parallel_desc_symbol_ @property def sbp_parallel(self): return self.sbp_parallel_ @property def opt_mirrored_parallel(self): return self.opt_mirrored_parallel_ def is_mirrored(self): return self.opt_mirrored_parallel.HasField("mirrored_parallel") def Assign(self, other): self.__init__( other.parallel_desc_symbol, other.sbp_parallel, other.opt_mirrored_parallel ) def DumpToOpNodeSignature(self, bn_in_op, op_node_signature): sbp_sig = op_node_signature.sbp_signature.bn_in_op2sbp_parallel assert bn_in_op not in sbp_sig sbp_sig[bn_in_op].CopyFrom(self.sbp_parallel) mirrored_sig = ( op_node_signature.mirrored_signature.bn_in_op2opt_mirrored_parallel ) assert bn_in_op not in mirrored_sig mirrored_sig[bn_in_op].CopyFrom(self.opt_mirrored_parallel) parallel_sig = ( op_node_signature.parallel_signature.bn_in_op2parallel_desc_symbol_id ) assert bn_in_op not in parallel_sig parallel_sig[bn_in_op] = self.parallel_desc_symbol.symbol_id def DumpToToInterfaceBlobConf(self, interface_blob_conf): if self.sbp_parallel.HasField("split_parallel"): interface_blob_conf.split_axis.value = self.sbp_parallel.split_parallel.axis else: interface_blob_conf.ClearField("split_axis") def __hash__(self): return self.hash_ def __eq__(self, other): return ( self.parallel_desc_symbol_ == other.parallel_desc_symbol_ and self.opt_mirrored_parallel_ == other.opt_mirrored_parallel_ and ( self.opt_mirrored_parallel_.HasField("mirrored_parallel") or self.sbp_parallel_ == other.sbp_parallel_ ) ) def __str__(self): return ( "\nparallel_desc_symbol: %s\nsbp_parallel: %s\nopt_mirrored_parallel: %s\n" % ( self.parallel_desc_symbol.parallel_conf, self.sbp_parallel, self.opt_mirrored_parallel, ) ) def _Hash(self): if self.opt_mirrored_parallel_.HasField("mirrored_parallel"): sbp_hash = 0 else: sbp_hash = hash(str(self.sbp_parallel_)) return ( hash(self.parallel_desc_symbol_) ^ hash(str(self.opt_mirrored_parallel_)) ^ sbp_hash ) def GetOpArgBlobAttribute(op_attribute, bn_in_op): if not op_attribute.HasField("batch_axis_signature"): return None if not op_attribute.HasField("logical_blob_desc_signature"): return None batch_axis_signature_map = op_attribute.batch_axis_signature.bn_in_op2batch_axis blob_desc_signature_map = ( op_attribute.logical_blob_desc_signature.bn_in_op2blob_desc ) arg_signature_map = op_attribute.arg_signature.bn_in_op2lbi lbi = arg_signature_map[bn_in_op] return OpArgBlobAttribute( batch_axis=batch_axis_signature_map[bn_in_op], blob_desc=blob_desc_signature_map[bn_in_op], logical_blob_name="%s/%s" % (lbi.op_name, lbi.blob_name), ) def GetOpArgParallelAttribute(parallel_desc_symbol, op_attribute, bn_in_op): sbp_signature_map = op_attribute.sbp_signature.bn_in_op2sbp_parallel mirrored_signature_map = ( op_attribute.mirrored_signature.bn_in_op2opt_mirrored_parallel ) return OpArgParallelAttribute( parallel_desc_symbol=parallel_desc_symbol, sbp_parallel=sbp_signature_map[bn_in_op], opt_mirrored_parallel=mirrored_signature_map[bn_in_op], ) def MakeMirroredOpArgParallelAttribute(parallel_desc_symbol): sbp_parallel = sbp_parallel_pb.SbpParallel() opt_mirrored_parallel = mirrored_parallel_pb.OptMirroredParallel() opt_mirrored_parallel.mirrored_parallel.SetInParent() return OpArgParallelAttribute( parallel_desc_symbol=parallel_desc_symbol, sbp_parallel=sbp_parallel, opt_mirrored_parallel=opt_mirrored_parallel, ) def MakeBroadcastOpArgParallelAttribute(parallel_desc_symbol): sbp_parallel = sbp_parallel_pb.SbpParallel() sbp_parallel.broadcast_parallel.SetInParent() opt_mirrored_parallel = mirrored_parallel_pb.OptMirroredParallel() return OpArgParallelAttribute( parallel_desc_symbol=parallel_desc_symbol, sbp_parallel=sbp_parallel, opt_mirrored_parallel=opt_mirrored_parallel, )
[ "oneflow.core.job.sbp_parallel_pb2.SbpParallel", "oneflow.python.framework.dtype.convert_proto_dtype_to_oneflow_dtype", "oneflow.core.job.mirrored_parallel_pb2.OptMirroredParallel" ]
[((6939, 6968), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (6966, 6968), True, 'import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb\n'), ((6997, 7039), 'oneflow.core.job.mirrored_parallel_pb2.OptMirroredParallel', 'mirrored_parallel_pb.OptMirroredParallel', ([], {}), '()\n', (7037, 7039), True, 'import oneflow.core.job.mirrored_parallel_pb2 as mirrored_parallel_pb\n'), ((7362, 7391), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (7389, 7391), True, 'import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb\n'), ((7470, 7512), 'oneflow.core.job.mirrored_parallel_pb2.OptMirroredParallel', 'mirrored_parallel_pb.OptMirroredParallel', ([], {}), '()\n', (7510, 7512), True, 'import oneflow.core.job.mirrored_parallel_pb2 as mirrored_parallel_pb\n'), ((1595, 1663), 'oneflow.python.framework.dtype.convert_proto_dtype_to_oneflow_dtype', 'convert_proto_dtype_to_oneflow_dtype', (['self.blob_desc_.body.data_type'], {}), '(self.blob_desc_.body.data_type)\n', (1631, 1663), False, 'from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype\n')]
import os import pickle import numpy as np import soundfile as sf import librosa import oneflow as flow from model.model import Generator, Discriminator from utils.dataset import VCDataset import utils.data_utils as preprocess class MaskCycleGANVCTrainer(object): """Trainer for MaskCycleGAN-VC """ def __init__(self, args): """ Args: args (Namespace): Program arguments from argparser """ # Store args self.num_epochs = args.num_epochs self.start_epoch = args.start_epoch self.generator_lr = args.generator_lr self.discriminator_lr = args.discriminator_lr self.decay_after = args.decay_after self.mini_batch_size = args.batch_size self.cycle_loss_lambda = args.cycle_loss_lambda self.identity_loss_lambda = args.identity_loss_lambda self.device = args.device self.epochs_per_save = args.epochs_per_save self.sample_rate = args.sample_rate self.validation_A_dir = os.path.join(args.origin_data_dir, args.speaker_A_id) self.output_A_dir = os.path.join(args.output_data_dir, args.speaker_A_id) self.validation_B_dir = os.path.join(args.origin_data_dir, args.speaker_B_id) self.output_B_dir = os.path.join(args.output_data_dir, args.speaker_B_id) self.infer_data_dir = args.infer_data_dir self.pretrain_models = args.pretrain_models # Initialize speakerA's dataset self.dataset_A = self.loadPickleFile( os.path.join( args.preprocessed_data_dir, args.speaker_A_id, f"{args.speaker_A_id}_normalized.pickle", ) ) dataset_A_norm_stats = np.load( os.path.join( args.preprocessed_data_dir, args.speaker_A_id, f"{args.speaker_A_id}_norm_stat.npz", ) ) self.dataset_A_mean = dataset_A_norm_stats["mean"] self.dataset_A_std = dataset_A_norm_stats["std"] # Initialize speakerB's dataset self.dataset_B = self.loadPickleFile( os.path.join( args.preprocessed_data_dir, args.speaker_B_id, f"{args.speaker_B_id}_normalized.pickle", ) ) dataset_B_norm_stats = np.load( os.path.join( args.preprocessed_data_dir, args.speaker_B_id, f"{args.speaker_B_id}_norm_stat.npz", ) ) self.dataset_B_mean = dataset_B_norm_stats["mean"] self.dataset_B_std = dataset_B_norm_stats["std"] # Compute lr decay rate self.n_samples = len(self.dataset_A) print(f"n_samples = {self.n_samples}") self.generator_lr_decay = self.generator_lr / float( self.num_epochs * (self.n_samples // self.mini_batch_size) ) self.discriminator_lr_decay = self.discriminator_lr / float( self.num_epochs * (self.n_samples // self.mini_batch_size) ) print(f"generator_lr_decay = {self.generator_lr_decay}") print(f"discriminator_lr_decay = {self.discriminator_lr_decay}") # Initialize Train Dataloader self.num_frames = args.num_frames self.dataset = VCDataset( datasetA=self.dataset_A, datasetB=self.dataset_B, n_frames=args.num_frames, max_mask_len=args.max_mask_len, ) self.train_dataloader = flow.utils.data.DataLoader( dataset=self.dataset, batch_size=self.mini_batch_size, shuffle=True, drop_last=False, ) # Initialize Generators and Discriminators self.generator_A2B = Generator().to(self.device) self.generator_B2A = Generator().to(self.device) self.discriminator_A = Discriminator().to(self.device) self.discriminator_B = Discriminator().to(self.device) # Discriminator to compute 2 step adversarial loss self.discriminator_A2 = Discriminator().to(self.device) # Discriminator to compute 2 step adversarial loss self.discriminator_B2 = Discriminator().to(self.device) # Initialize Optimizers g_params = list(self.generator_A2B.parameters()) + list( self.generator_B2A.parameters() ) d_params = ( list(self.discriminator_A.parameters()) + list(self.discriminator_B.parameters()) + list(self.discriminator_A2.parameters()) + list(self.discriminator_B2.parameters()) ) self.generator_optimizer = flow.optim.Adam( g_params, lr=self.generator_lr, betas=(0.5, 0.999) ) self.discriminator_optimizer = flow.optim.Adam( d_params, lr=self.discriminator_lr, betas=(0.5, 0.999) ) def adjust_lr_rate(self, optimizer, generator): """Decays learning rate. Args: optimizer (torch.optim): torch optimizer generator (bool): Whether to adjust generator lr. """ if generator: self.generator_lr = max(0.0, self.generator_lr - self.generator_lr_decay) for param_groups in optimizer.param_groups: param_groups["lr"] = self.generator_lr else: self.discriminator_lr = max( 0.0, self.discriminator_lr - self.discriminator_lr_decay ) for param_groups in optimizer.param_groups: param_groups["lr"] = self.discriminator_lr def reset_grad(self): """Sets gradients of the generators and discriminators to zero before backpropagation. """ self.generator_optimizer.zero_grad() self.discriminator_optimizer.zero_grad() def loadPickleFile(self, fileName): """Loads a Pickle file. Args: fileName (str): pickle file path Returns: file object: The loaded pickle file object """ with open(fileName, "rb") as f: return pickle.load(f) def train(self): """Implements the training loop for MaskCycleGAN-VC """ for epoch in range(self.start_epoch, self.num_epochs + 1): for i, (real_A, mask_A, real_B, mask_B) in enumerate(self.train_dataloader): num_iterations = (self.n_samples // self.mini_batch_size) * epoch + i if num_iterations > 10000: self.identity_loss_lambda = 0 if num_iterations > self.decay_after: self.adjust_lr_rate(self.generator_optimizer, generator=True) self.adjust_lr_rate(self.generator_optimizer, generator=False) real_A = real_A.to(self.device, dtype=flow.float) mask_A = mask_A.to(self.device, dtype=flow.float) real_B = real_B.to(self.device, dtype=flow.float) mask_B = mask_B.to(self.device, dtype=flow.float) # Train Generator self.generator_A2B.train() self.generator_B2A.train() self.discriminator_A.eval() self.discriminator_B.eval() self.discriminator_A2.eval() self.discriminator_B2.eval() # Generator Feed Forward fake_B = self.generator_A2B(real_A, mask_A) cycle_A = self.generator_B2A(fake_B, flow.ones_like(fake_B)) fake_A = self.generator_B2A(real_B, mask_B) cycle_B = self.generator_A2B(fake_A, flow.ones_like(fake_A)) identity_A = self.generator_B2A(real_A, flow.ones_like(real_A)) identity_B = self.generator_A2B(real_B, flow.ones_like(real_B)) d_fake_A = self.discriminator_A(fake_A) d_fake_B = self.discriminator_B(fake_B) # For Two Step Adverserial Loss d_fake_cycle_A = self.discriminator_A2(cycle_A) d_fake_cycle_B = self.discriminator_B2(cycle_B) # Generator Cycle Loss cycleLoss = flow.mean(flow.abs(real_A - cycle_A)) + flow.mean( flow.abs(real_B - cycle_B) ) # Generator Identity Loss identityLoss = flow.mean(flow.abs(real_A - identity_A)) + flow.mean( flow.abs(real_B - identity_B) ) # Generator Loss g_loss_A2B = flow.mean((1 - d_fake_B) ** 2) g_loss_B2A = flow.mean((1 - d_fake_A) ** 2) # Generator Two Step Adverserial Loss generator_loss_A2B_2nd = flow.mean((1 - d_fake_cycle_B) ** 2) generator_loss_B2A_2nd = flow.mean((1 - d_fake_cycle_A) ** 2) # Total Generator Loss g_loss = ( g_loss_A2B + g_loss_B2A + generator_loss_A2B_2nd + generator_loss_B2A_2nd + self.cycle_loss_lambda * cycleLoss + self.identity_loss_lambda * identityLoss ) # Backprop for Generator self.reset_grad() g_loss.backward() self.generator_optimizer.step() # Train Discriminator self.generator_A2B.eval() self.generator_B2A.eval() self.discriminator_A.train() self.discriminator_B.train() self.discriminator_A2.train() self.discriminator_B2.train() # Discriminator Feed Forward d_real_A = self.discriminator_A(real_A) d_real_B = self.discriminator_B(real_B) d_real_A2 = self.discriminator_A2(real_A) d_real_B2 = self.discriminator_B2(real_B) generated_A = self.generator_B2A(real_B, mask_B) d_fake_A = self.discriminator_A(generated_A) # For Two Step Adverserial Loss A->B cycled_B = self.generator_A2B(generated_A, flow.ones_like(generated_A)) d_cycled_B = self.discriminator_B2(cycled_B) generated_B = self.generator_A2B(real_A, mask_A) d_fake_B = self.discriminator_B(generated_B) # For Two Step Adverserial Loss B->A cycled_A = self.generator_B2A(generated_B, flow.ones_like(generated_B)) d_cycled_A = self.discriminator_A2(cycled_A) # Loss Functions d_loss_A_real = flow.mean((1 - d_real_A) ** 2) d_loss_A_fake = flow.mean((0 - d_fake_A) ** 2) d_loss_A = (d_loss_A_real + d_loss_A_fake) / 2.0 d_loss_B_real = flow.mean((1 - d_real_B) ** 2) d_loss_B_fake = flow.mean((0 - d_fake_B) ** 2) d_loss_B = (d_loss_B_real + d_loss_B_fake) / 2.0 # Two Step Adverserial Loss d_loss_A_cycled = flow.mean((0 - d_cycled_A) ** 2) d_loss_B_cycled = flow.mean((0 - d_cycled_B) ** 2) d_loss_A2_real = flow.mean((1 - d_real_A2) ** 2) d_loss_B2_real = flow.mean((1 - d_real_B2) ** 2) d_loss_A_2nd = (d_loss_A2_real + d_loss_A_cycled) / 2.0 d_loss_B_2nd = (d_loss_B2_real + d_loss_B_cycled) / 2.0 # Final Loss for discriminator with the Two Step Adverserial Loss d_loss = (d_loss_A + d_loss_B) / 2.0 + ( d_loss_A_2nd + d_loss_B_2nd ) / 2.0 # Backprop for Discriminator self.reset_grad() d_loss.backward() self.discriminator_optimizer.step() if (i + 1) % 2 == 0: print( "Iter:{} Generator Loss:{:.4f} Discrimator Loss:{:.4f} GA2B:{:.4f} GB2A:{:.4f} G_id:{:.4f} G_cyc:{:.4f} D_A:{:.4f} D_B:{:.4f}".format( num_iterations, g_loss.item(), d_loss.item(), g_loss_A2B, g_loss_B2A, identityLoss, cycleLoss, d_loss_A, d_loss_B, ) ) # Save each model checkpoint and validation if epoch % self.epochs_per_save == 0 and epoch != 0: self.saveModelCheckPoint(epoch, PATH="model_checkpoint") self.validation_for_A_dir() self.validation_for_B_dir() def infer(self): """Implements the infering loop for MaskCycleGAN-VC """ # load pretrain models self.loadModel(self.pretrain_models) num_mcep = 80 sampling_rate = self.sample_rate frame_period = 5.0 infer_A_dir = self.infer_data_dir print("Generating Validation Data B from A...") for file in os.listdir(infer_A_dir): filePath = os.path.join(infer_A_dir, file) wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True) wav = preprocess.wav_padding( wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4 ) f0, timeaxis, sp, ap = preprocess.world_decompose( wav=wav, fs=sampling_rate, frame_period=frame_period ) f0_converted = preprocess.pitch_conversion( f0=f0, mean_log_src=self.dataset_A_mean, std_log_src=self.dataset_A_std, mean_log_target=self.dataset_B_mean, std_log_target=self.dataset_B_std, ) coded_sp = preprocess.world_encode_spectral_envelop( sp=sp, fs=sampling_rate, dim=num_mcep ) coded_sp_transposed = coded_sp.T coded_sp_norm = ( coded_sp_transposed - self.dataset_A_mean ) / self.dataset_A_std coded_sp_norm = np.array([coded_sp_norm]) if flow.cuda.is_available(): coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float() else: coded_sp_norm = flow.tensor(coded_sp_norm).float() coded_sp_converted_norm = self.generator_A2B( coded_sp_norm, flow.ones_like(coded_sp_norm) ) coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy() coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm) coded_sp_converted = ( coded_sp_converted_norm * self.dataset_B_std + self.dataset_B_mean ) coded_sp_converted = coded_sp_converted.T coded_sp_converted = np.ascontiguousarray(coded_sp_converted).astype( np.double ) decoded_sp_converted = preprocess.world_decode_spectral_envelop( coded_sp=coded_sp_converted, fs=sampling_rate ) wav_transformed = preprocess.world_speech_synthesis( f0=f0_converted[0], decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period, ) sf.write( os.path.join(infer_A_dir, "convert_" + os.path.basename(file)), wav_transformed, sampling_rate, ) def validation_for_A_dir(self): num_mcep = 80 sampling_rate = 22050 frame_period = 5.0 validation_A_dir = self.validation_A_dir output_A_dir = self.output_A_dir os.makedirs(output_A_dir, exist_ok=True) print("Generating Validation Data B from A...") for file in os.listdir(validation_A_dir): filePath = os.path.join(validation_A_dir, file) wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True) wav = preprocess.wav_padding( wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4 ) f0, timeaxis, sp, ap = preprocess.world_decompose( wav=wav, fs=sampling_rate, frame_period=frame_period ) f0_converted = preprocess.pitch_conversion( f0=f0, mean_log_src=self.dataset_A_mean, std_log_src=self.dataset_A_std, mean_log_target=self.dataset_B_mean, std_log_target=self.dataset_B_std, ) coded_sp = preprocess.world_encode_spectral_envelop( sp=sp, fs=sampling_rate, dim=num_mcep ) coded_sp_transposed = coded_sp.T coded_sp_norm = ( coded_sp_transposed - self.dataset_A_mean ) / self.dataset_A_std coded_sp_norm = np.array([coded_sp_norm]) if flow.cuda.is_available(): coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float() else: coded_sp_norm = flow.tensor(coded_sp_norm).float() coded_sp_converted_norm = self.generator_A2B( coded_sp_norm, flow.ones_like(coded_sp_norm) ) coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy() coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm) coded_sp_converted = ( coded_sp_converted_norm * self.dataset_B_std + self.dataset_B_mean ) coded_sp_converted = coded_sp_converted.T coded_sp_converted = np.ascontiguousarray(coded_sp_converted).astype( np.double ) decoded_sp_converted = preprocess.world_decode_spectral_envelop( coded_sp=coded_sp_converted, fs=sampling_rate ) wav_transformed = preprocess.world_speech_synthesis( f0=f0_converted[0], decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period, ) sf.write( os.path.join(output_A_dir, "convert_" + os.path.basename(file)), wav_transformed, sampling_rate, ) def validation_for_B_dir(self): num_mcep = 80 sampling_rate = 22050 frame_period = 5.0 validation_B_dir = self.validation_B_dir output_B_dir = self.output_B_dir os.makedirs(output_B_dir, exist_ok=True) print("Generating Validation Data A from B...") for file in os.listdir(validation_B_dir): filePath = os.path.join(validation_B_dir, file) wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True) wav = preprocess.wav_padding( wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4 ) f0, timeaxis, sp, ap = preprocess.world_decompose( wav=wav, fs=sampling_rate, frame_period=frame_period ) f0_converted = preprocess.pitch_conversion( f0=f0, mean_log_src=self.dataset_B_mean, std_log_src=self.dataset_B_std, mean_log_target=self.dataset_A_mean, std_log_target=self.dataset_A_std, ) coded_sp = preprocess.world_encode_spectral_envelop( sp=sp, fs=sampling_rate, dim=num_mcep ) coded_sp_transposed = coded_sp.T coded_sp_norm = ( coded_sp_transposed - self.dataset_B_mean ) / self.dataset_B_std coded_sp_norm = np.array([coded_sp_norm]) if flow.cuda.is_available(): coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float() else: coded_sp_norm = flow.tensor(coded_sp_norm).float() coded_sp_converted_norm = self.generator_B2A( coded_sp_norm, flow.ones_like(coded_sp_norm) ) coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy() coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm) coded_sp_converted = ( coded_sp_converted_norm * self.dataset_A_std + self.dataset_A_mean ) coded_sp_converted = coded_sp_converted.T coded_sp_converted = np.ascontiguousarray(coded_sp_converted).astype( np.double ) decoded_sp_converted = preprocess.world_decode_spectral_envelop( coded_sp=coded_sp_converted, fs=sampling_rate ) wav_transformed = preprocess.world_speech_synthesis( f0=f0_converted[0], decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period, ) sf.write( os.path.join(output_B_dir, "convert_" + os.path.basename(file)), wav_transformed, sampling_rate, ) def saveModelCheckPoint(self, epoch, PATH): flow.save( self.generator_A2B.state_dict(), os.path.join(PATH, "generator_A2B_%d" % epoch), ) flow.save( self.generator_B2A.state_dict(), os.path.join(PATH, "generator_B2A_%d" % epoch), ) flow.save( self.discriminator_A.state_dict(), os.path.join(PATH, "discriminator_A_%d" % epoch), ) flow.save( self.discriminator_B.state_dict(), os.path.join(PATH, "discriminator_B_%d" % epoch), ) def loadModel(self, PATH): self.generator_A2B.load_state_dict( flow.load(os.path.join(PATH, "generator_A2B")) ) self.generator_B2A.load_state_dict( flow.load(os.path.join(PATH, "generator_B2A")) ) self.discriminator_A.load_state_dict( flow.load(os.path.join(PATH, "discriminator_A")) ) self.discriminator_B.load_state_dict( flow.load(os.path.join(PATH, "discriminator_B")) )
[ "oneflow.optim.Adam", "oneflow.tensor", "oneflow.mean", "oneflow.utils.data.DataLoader", "oneflow.cuda.is_available", "oneflow.abs", "oneflow.ones_like" ]
[((1021, 1074), 'os.path.join', 'os.path.join', (['args.origin_data_dir', 'args.speaker_A_id'], {}), '(args.origin_data_dir, args.speaker_A_id)\n', (1033, 1074), False, 'import os\n'), ((1103, 1156), 'os.path.join', 'os.path.join', (['args.output_data_dir', 'args.speaker_A_id'], {}), '(args.output_data_dir, args.speaker_A_id)\n', (1115, 1156), False, 'import os\n'), ((1189, 1242), 'os.path.join', 'os.path.join', (['args.origin_data_dir', 'args.speaker_B_id'], {}), '(args.origin_data_dir, args.speaker_B_id)\n', (1201, 1242), False, 'import os\n'), ((1271, 1324), 'os.path.join', 'os.path.join', (['args.output_data_dir', 'args.speaker_B_id'], {}), '(args.output_data_dir, args.speaker_B_id)\n', (1283, 1324), False, 'import os\n'), ((3313, 3435), 'utils.dataset.VCDataset', 'VCDataset', ([], {'datasetA': 'self.dataset_A', 'datasetB': 'self.dataset_B', 'n_frames': 'args.num_frames', 'max_mask_len': 'args.max_mask_len'}), '(datasetA=self.dataset_A, datasetB=self.dataset_B, n_frames=args.\n num_frames, max_mask_len=args.max_mask_len)\n', (3322, 3435), False, 'from utils.dataset import VCDataset\n'), ((3522, 3639), 'oneflow.utils.data.DataLoader', 'flow.utils.data.DataLoader', ([], {'dataset': 'self.dataset', 'batch_size': 'self.mini_batch_size', 'shuffle': '(True)', 'drop_last': '(False)'}), '(dataset=self.dataset, batch_size=self.\n mini_batch_size, shuffle=True, drop_last=False)\n', (3548, 3639), True, 'import oneflow as flow\n'), ((4666, 4733), 'oneflow.optim.Adam', 'flow.optim.Adam', (['g_params'], {'lr': 'self.generator_lr', 'betas': '(0.5, 0.999)'}), '(g_params, lr=self.generator_lr, betas=(0.5, 0.999))\n', (4681, 4733), True, 'import oneflow as flow\n'), ((4795, 4866), 'oneflow.optim.Adam', 'flow.optim.Adam', (['d_params'], {'lr': 'self.discriminator_lr', 'betas': '(0.5, 0.999)'}), '(d_params, lr=self.discriminator_lr, betas=(0.5, 0.999))\n', (4810, 4866), True, 'import oneflow as flow\n'), ((13141, 13164), 'os.listdir', 'os.listdir', (['infer_A_dir'], {}), '(infer_A_dir)\n', (13151, 13164), False, 'import os\n'), ((15843, 15883), 'os.makedirs', 'os.makedirs', (['output_A_dir'], {'exist_ok': '(True)'}), '(output_A_dir, exist_ok=True)\n', (15854, 15883), False, 'import os\n'), ((15961, 15989), 'os.listdir', 'os.listdir', (['validation_A_dir'], {}), '(validation_A_dir)\n', (15971, 15989), False, 'import os\n'), ((18674, 18714), 'os.makedirs', 'os.makedirs', (['output_B_dir'], {'exist_ok': '(True)'}), '(output_B_dir, exist_ok=True)\n', (18685, 18714), False, 'import os\n'), ((18792, 18820), 'os.listdir', 'os.listdir', (['validation_B_dir'], {}), '(validation_B_dir)\n', (18802, 18820), False, 'import os\n'), ((1526, 1631), 'os.path.join', 'os.path.join', (['args.preprocessed_data_dir', 'args.speaker_A_id', 'f"""{args.speaker_A_id}_normalized.pickle"""'], {}), "(args.preprocessed_data_dir, args.speaker_A_id,\n f'{args.speaker_A_id}_normalized.pickle')\n", (1538, 1631), False, 'import os\n'), ((1753, 1854), 'os.path.join', 'os.path.join', (['args.preprocessed_data_dir', 'args.speaker_A_id', 'f"""{args.speaker_A_id}_norm_stat.npz"""'], {}), "(args.preprocessed_data_dir, args.speaker_A_id,\n f'{args.speaker_A_id}_norm_stat.npz')\n", (1765, 1854), False, 'import os\n'), ((2139, 2244), 'os.path.join', 'os.path.join', (['args.preprocessed_data_dir', 'args.speaker_B_id', 'f"""{args.speaker_B_id}_normalized.pickle"""'], {}), "(args.preprocessed_data_dir, args.speaker_B_id,\n f'{args.speaker_B_id}_normalized.pickle')\n", (2151, 2244), False, 'import os\n'), ((2366, 2467), 'os.path.join', 'os.path.join', (['args.preprocessed_data_dir', 'args.speaker_B_id', 'f"""{args.speaker_B_id}_norm_stat.npz"""'], {}), "(args.preprocessed_data_dir, args.speaker_B_id,\n f'{args.speaker_B_id}_norm_stat.npz')\n", (2378, 2467), False, 'import os\n'), ((6098, 6112), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6109, 6112), False, 'import pickle\n'), ((13189, 13220), 'os.path.join', 'os.path.join', (['infer_A_dir', 'file'], {}), '(infer_A_dir, file)\n', (13201, 13220), False, 'import os\n'), ((13242, 13293), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (13254, 13293), False, 'import librosa\n'), ((13312, 13404), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (13334, 13404), True, 'import utils.data_utils as preprocess\n'), ((13466, 13551), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (13492, 13551), True, 'import utils.data_utils as preprocess\n'), ((13604, 13784), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.dataset_A_mean', 'std_log_src': 'self.dataset_A_std', 'mean_log_target': 'self.dataset_B_mean', 'std_log_target': 'self.dataset_B_std'}), '(f0=f0, mean_log_src=self.dataset_A_mean,\n std_log_src=self.dataset_A_std, mean_log_target=self.dataset_B_mean,\n std_log_target=self.dataset_B_std)\n', (13631, 13784), True, 'import utils.data_utils as preprocess\n'), ((13895, 13974), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (13935, 13974), True, 'import utils.data_utils as preprocess\n'), ((14201, 14226), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (14209, 14226), True, 'import numpy as np\n'), ((14243, 14267), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (14265, 14267), True, 'import oneflow as flow\n'), ((14685, 14720), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (14695, 14720), True, 'import numpy as np\n'), ((15064, 15156), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (15104, 15156), True, 'import utils.data_utils as preprocess\n'), ((15213, 15356), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted[0]', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted[0], decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (15246, 15356), True, 'import utils.data_utils as preprocess\n'), ((16014, 16050), 'os.path.join', 'os.path.join', (['validation_A_dir', 'file'], {}), '(validation_A_dir, file)\n', (16026, 16050), False, 'import os\n'), ((16072, 16123), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (16084, 16123), False, 'import librosa\n'), ((16142, 16234), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (16164, 16234), True, 'import utils.data_utils as preprocess\n'), ((16296, 16381), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (16322, 16381), True, 'import utils.data_utils as preprocess\n'), ((16434, 16614), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.dataset_A_mean', 'std_log_src': 'self.dataset_A_std', 'mean_log_target': 'self.dataset_B_mean', 'std_log_target': 'self.dataset_B_std'}), '(f0=f0, mean_log_src=self.dataset_A_mean,\n std_log_src=self.dataset_A_std, mean_log_target=self.dataset_B_mean,\n std_log_target=self.dataset_B_std)\n', (16461, 16614), True, 'import utils.data_utils as preprocess\n'), ((16725, 16804), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (16765, 16804), True, 'import utils.data_utils as preprocess\n'), ((17031, 17056), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (17039, 17056), True, 'import numpy as np\n'), ((17073, 17097), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (17095, 17097), True, 'import oneflow as flow\n'), ((17515, 17550), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (17525, 17550), True, 'import numpy as np\n'), ((17894, 17986), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (17934, 17986), True, 'import utils.data_utils as preprocess\n'), ((18043, 18186), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted[0]', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted[0], decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (18076, 18186), True, 'import utils.data_utils as preprocess\n'), ((18845, 18881), 'os.path.join', 'os.path.join', (['validation_B_dir', 'file'], {}), '(validation_B_dir, file)\n', (18857, 18881), False, 'import os\n'), ((18903, 18954), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (18915, 18954), False, 'import librosa\n'), ((18973, 19065), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (18995, 19065), True, 'import utils.data_utils as preprocess\n'), ((19127, 19212), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (19153, 19212), True, 'import utils.data_utils as preprocess\n'), ((19265, 19445), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.dataset_B_mean', 'std_log_src': 'self.dataset_B_std', 'mean_log_target': 'self.dataset_A_mean', 'std_log_target': 'self.dataset_A_std'}), '(f0=f0, mean_log_src=self.dataset_B_mean,\n std_log_src=self.dataset_B_std, mean_log_target=self.dataset_A_mean,\n std_log_target=self.dataset_A_std)\n', (19292, 19445), True, 'import utils.data_utils as preprocess\n'), ((19556, 19635), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (19596, 19635), True, 'import utils.data_utils as preprocess\n'), ((19862, 19887), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (19870, 19887), True, 'import numpy as np\n'), ((19904, 19928), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (19926, 19928), True, 'import oneflow as flow\n'), ((20346, 20381), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (20356, 20381), True, 'import numpy as np\n'), ((20725, 20817), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (20765, 20817), True, 'import utils.data_utils as preprocess\n'), ((20874, 21017), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted[0]', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted[0], decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (20907, 21017), True, 'import utils.data_utils as preprocess\n'), ((21415, 21461), 'os.path.join', 'os.path.join', (['PATH', "('generator_A2B_%d' % epoch)"], {}), "(PATH, 'generator_A2B_%d' % epoch)\n", (21427, 21461), False, 'import os\n'), ((21549, 21595), 'os.path.join', 'os.path.join', (['PATH', "('generator_B2A_%d' % epoch)"], {}), "(PATH, 'generator_B2A_%d' % epoch)\n", (21561, 21595), False, 'import os\n'), ((21685, 21733), 'os.path.join', 'os.path.join', (['PATH', "('discriminator_A_%d' % epoch)"], {}), "(PATH, 'discriminator_A_%d' % epoch)\n", (21697, 21733), False, 'import os\n'), ((21823, 21871), 'os.path.join', 'os.path.join', (['PATH', "('discriminator_B_%d' % epoch)"], {}), "(PATH, 'discriminator_B_%d' % epoch)\n", (21835, 21871), False, 'import os\n'), ((3775, 3786), 'model.model.Generator', 'Generator', ([], {}), '()\n', (3784, 3786), False, 'from model.model import Generator, Discriminator\n'), ((3832, 3843), 'model.model.Generator', 'Generator', ([], {}), '()\n', (3841, 3843), False, 'from model.model import Generator, Discriminator\n'), ((3891, 3906), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (3904, 3906), False, 'from model.model import Generator, Discriminator\n'), ((3954, 3969), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (3967, 3969), False, 'from model.model import Generator, Discriminator\n'), ((4077, 4092), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (4090, 4092), False, 'from model.model import Generator, Discriminator\n'), ((4200, 4215), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (4213, 4215), False, 'from model.model import Generator, Discriminator\n'), ((8534, 8564), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_B) ** 2)'], {}), '((1 - d_fake_B) ** 2)\n', (8543, 8564), True, 'import oneflow as flow\n'), ((8594, 8624), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_A) ** 2)'], {}), '((1 - d_fake_A) ** 2)\n', (8603, 8624), True, 'import oneflow as flow\n'), ((8721, 8757), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_cycle_B) ** 2)'], {}), '((1 - d_fake_cycle_B) ** 2)\n', (8730, 8757), True, 'import oneflow as flow\n'), ((8799, 8835), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_cycle_A) ** 2)'], {}), '((1 - d_fake_cycle_A) ** 2)\n', (8808, 8835), True, 'import oneflow as flow\n'), ((10657, 10687), 'oneflow.mean', 'flow.mean', (['((1 - d_real_A) ** 2)'], {}), '((1 - d_real_A) ** 2)\n', (10666, 10687), True, 'import oneflow as flow\n'), ((10720, 10750), 'oneflow.mean', 'flow.mean', (['((0 - d_fake_A) ** 2)'], {}), '((0 - d_fake_A) ** 2)\n', (10729, 10750), True, 'import oneflow as flow\n'), ((10849, 10879), 'oneflow.mean', 'flow.mean', (['((1 - d_real_B) ** 2)'], {}), '((1 - d_real_B) ** 2)\n', (10858, 10879), True, 'import oneflow as flow\n'), ((10912, 10942), 'oneflow.mean', 'flow.mean', (['((0 - d_fake_B) ** 2)'], {}), '((0 - d_fake_B) ** 2)\n', (10921, 10942), True, 'import oneflow as flow\n'), ((11087, 11119), 'oneflow.mean', 'flow.mean', (['((0 - d_cycled_A) ** 2)'], {}), '((0 - d_cycled_A) ** 2)\n', (11096, 11119), True, 'import oneflow as flow\n'), ((11154, 11186), 'oneflow.mean', 'flow.mean', (['((0 - d_cycled_B) ** 2)'], {}), '((0 - d_cycled_B) ** 2)\n', (11163, 11186), True, 'import oneflow as flow\n'), ((11220, 11251), 'oneflow.mean', 'flow.mean', (['((1 - d_real_A2) ** 2)'], {}), '((1 - d_real_A2) ** 2)\n', (11229, 11251), True, 'import oneflow as flow\n'), ((11285, 11316), 'oneflow.mean', 'flow.mean', (['((1 - d_real_B2) ** 2)'], {}), '((1 - d_real_B2) ** 2)\n', (11294, 11316), True, 'import oneflow as flow\n'), ((14518, 14547), 'oneflow.ones_like', 'flow.ones_like', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (14532, 14547), True, 'import oneflow as flow\n'), ((17348, 17377), 'oneflow.ones_like', 'flow.ones_like', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (17362, 17377), True, 'import oneflow as flow\n'), ((20179, 20208), 'oneflow.ones_like', 'flow.ones_like', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (20193, 20208), True, 'import oneflow as flow\n'), ((21981, 22016), 'os.path.join', 'os.path.join', (['PATH', '"""generator_A2B"""'], {}), "(PATH, 'generator_A2B')\n", (21993, 22016), False, 'import os\n'), ((22094, 22129), 'os.path.join', 'os.path.join', (['PATH', '"""generator_B2A"""'], {}), "(PATH, 'generator_B2A')\n", (22106, 22129), False, 'import os\n'), ((22209, 22246), 'os.path.join', 'os.path.join', (['PATH', '"""discriminator_A"""'], {}), "(PATH, 'discriminator_A')\n", (22221, 22246), False, 'import os\n'), ((22326, 22363), 'os.path.join', 'os.path.join', (['PATH', '"""discriminator_B"""'], {}), "(PATH, 'discriminator_B')\n", (22338, 22363), False, 'import os\n'), ((7481, 7503), 'oneflow.ones_like', 'flow.ones_like', (['fake_B'], {}), '(fake_B)\n', (7495, 7503), True, 'import oneflow as flow\n'), ((7618, 7640), 'oneflow.ones_like', 'flow.ones_like', (['fake_A'], {}), '(fake_A)\n', (7632, 7640), True, 'import oneflow as flow\n'), ((7698, 7720), 'oneflow.ones_like', 'flow.ones_like', (['real_A'], {}), '(real_A)\n', (7712, 7720), True, 'import oneflow as flow\n'), ((7778, 7800), 'oneflow.ones_like', 'flow.ones_like', (['real_B'], {}), '(real_B)\n', (7792, 7800), True, 'import oneflow as flow\n'), ((10171, 10198), 'oneflow.ones_like', 'flow.ones_like', (['generated_A'], {}), '(generated_A)\n', (10185, 10198), True, 'import oneflow as flow\n'), ((10501, 10528), 'oneflow.ones_like', 'flow.ones_like', (['generated_B'], {}), '(generated_B)\n', (10515, 10528), True, 'import oneflow as flow\n'), ((14940, 14980), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (14960, 14980), True, 'import numpy as np\n'), ((17770, 17810), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (17790, 17810), True, 'import numpy as np\n'), ((20601, 20641), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (20621, 20641), True, 'import numpy as np\n'), ((8169, 8195), 'oneflow.abs', 'flow.abs', (['(real_A - cycle_A)'], {}), '(real_A - cycle_A)\n', (8177, 8195), True, 'import oneflow as flow\n'), ((8230, 8256), 'oneflow.abs', 'flow.abs', (['(real_B - cycle_B)'], {}), '(real_B - cycle_B)\n', (8238, 8256), True, 'import oneflow as flow\n'), ((8359, 8388), 'oneflow.abs', 'flow.abs', (['(real_A - identity_A)'], {}), '(real_A - identity_A)\n', (8367, 8388), True, 'import oneflow as flow\n'), ((8423, 8452), 'oneflow.abs', 'flow.abs', (['(real_B - identity_B)'], {}), '(real_B - identity_B)\n', (8431, 8452), True, 'import oneflow as flow\n'), ((14393, 14419), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (14404, 14419), True, 'import oneflow as flow\n'), ((15525, 15547), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (15541, 15547), False, 'import os\n'), ((17223, 17249), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (17234, 17249), True, 'import oneflow as flow\n'), ((18356, 18378), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (18372, 18378), False, 'import os\n'), ((20054, 20080), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (20065, 20080), True, 'import oneflow as flow\n'), ((21187, 21209), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (21203, 21209), False, 'import os\n'), ((14301, 14327), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (14312, 14327), True, 'import oneflow as flow\n'), ((17131, 17157), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (17142, 17157), True, 'import oneflow as flow\n'), ((19962, 19988), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (19973, 19988), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import uuid from typing import Callable, Optional, Union import oneflow.python.framework.parallel_conf_util as parallel_conf_util import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.python.framework.c_api_util as c_api_util import oneflow.python.framework.session_context as session_ctx import oneflow.python.framework.compile_context as compile_context import oneflow.python.framework.id_util as id_util import oneflow.python.framework.local_blob as local_blob_util import oneflow.python.framework.remote_blob as remote_blob_util import oneflow.python.framework.watcher as watcher_util import oneflow.python.framework.typing as oft import oneflow.python.framework.typing_util as oft_util import oneflow.python.lib.core.enable_if as enable_if import oneflow.python.framework.hob as hob from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair from oneflow.python.framework.remote_blob import ConsistentBlob, MirroredBlob from oneflow.python.oneflow_export import oneflow_export import oneflow.python.eager as eager_util import oneflow import inspect @oneflow_export("watch") def Watch( blob_watched: remote_blob_util.BlobDef, handler_or_prompt: Optional[Union[Callable, str]] = None, ) -> None: r"""Register callback for a blob. The callback will be called after the computation produce the blob finishes. Args: blob_watched: a `Blob` handler_or_prompt: a function has an argument of a `Blob` """ api = enable_if.unique([EagerWatch, LazyWatch]) return api(blob_watched, handler_or_prompt) @enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled) def EagerWatch(blob_watched, handler_or_prompt=None): handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt) local_blob = local_blob_util.MakeLocalBlob4EagerBlob(blob_watched) handler(oft_util.TransformWatchedBlob(local_blob, handler)) @enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled) def LazyWatch(blob_watched, handler_or_prompt=None): handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt) if isinstance(blob_watched, ConsistentBlob): LazyConsistentWatch(blob_watched, handler) elif isinstance(blob_watched, MirroredBlob): handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler) for consistent_blob, sub_handler in zip( blob_watched.sub_consistent_blob_list, handlers ): assert isinstance(consistent_blob, ConsistentBlob) LazyConsistentWatch(consistent_blob, sub_handler) else: raise NotImplementedError def LazyConsistentWatch(blob_watched, handler): handler_uuid = str(uuid.uuid1()) op_conf = op_conf_util.OperatorConf() op_conf.name = id_util.UniqueStr("ForeignWatch_") setattr(op_conf.foreign_watch_conf, "in", blob_watched.unique_name) op_conf.foreign_watch_conf.handler_uuid = handler_uuid device_name = blob_watched.parallel_conf.device_name[0] tag_and_dev_ids = parallel_conf_util.GetDeviceTagAndMachineDeviceIds( blob_watched.parallel_conf ) with oneflow.scope.placement(*tag_and_dev_ids): compile_context.CurJobAddOp(op_conf) watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler) @oneflow_export("watch_diff") def WatchDiff( blob_watched: remote_blob_util.BlobDef, handler_or_prompt: Optional[Union[Callable, str]] = None, ) -> None: r"""Register callback for gradient of a blob. The callback will be called after the computation produce the gradient blob finishes. Args: blob_watched: a `Blob` handler_or_prompt: a function has an argument of a `Blob` """ api = enable_if.unique([EagerWatchDiff, LazyWatchDiff]) return api(blob_watched, handler_or_prompt) @enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled) def EagerWatchDiff(blob_watched, handler_or_prompt=None): handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt) handler_uuid = str(uuid.uuid1()) lbi_and_uuid = LbiAndDiffWatcherUuidPair() lbi_and_uuid.lbi.CopyFrom(blob_watched.lbi) lbi_and_uuid.watcher_uuid = handler_uuid c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid) uuid2watch_handler = session_ctx.GetDefaultSession().uuid2watch_handler uuid2watch_handler[handler_uuid] = lambda x: EagerWatch(x, handler_or_prompt) @enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled) def LazyWatchDiff(blob_watched, handler_or_prompt=None): handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt) if isinstance(blob_watched, ConsistentBlob): LazyConsistentWatchDiff(blob_watched, handler) elif isinstance(blob_watched, MirroredBlob): handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler) for consistent_blob, sub_handler in zip( blob_watched.sub_consistent_blob_list, handlers ): assert isinstance(consistent_blob, ConsistentBlob) LazyConsistentWatchDiff(consistent_blob, sub_handler) else: raise NotImplementedError def LazyConsistentWatchDiff(blob_watched, handler): handler_uuid = str(uuid.uuid1()) lbi_and_uuid = LbiAndDiffWatcherUuidPair() lbi_and_uuid.lbi.CopyFrom(blob_watched.lbi) lbi_and_uuid.watcher_uuid = handler_uuid c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid) watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler) def _CheckOrMakeHandler(blob_watched, handler_or_prompt): if callable(handler_or_prompt): parameters = inspect.signature(handler_or_prompt).parameters oft_util.CheckWatchCallbackParameterAnnotation(parameters) annotation = parameters[list(parameters.keys())[0]].annotation oft_util.CheckWatchedBlobByAnnotation(blob_watched, annotation) return handler_or_prompt prompt = handler_or_prompt def Handler(x: GetTypeAnnotation(blob_watched)): if prompt is not None: print(str(prompt)) print(x) return Handler def _MakeSubConsistentBlobHandlers(blob_watched, handler): assert isinstance(blob_watched, MirroredBlob) handler4parallel_id_and_local_blob = _MakeHandler4ParallelIdAndLocalBlob( blob_watched, handler ) return [ _WrapperHandler4ParallelIdAndLocalBlob(i, handler4parallel_id_and_local_blob) for i in range(len(blob_watched.sub_consistent_blob_list)) ] def _WrapperHandler4ParallelIdAndLocalBlob( parallel_id, handler4parallel_id_and_local_blob ): return lambda local_blob: handler4parallel_id_and_local_blob( parallel_id, local_blob ) def _MakeHandler4ParallelIdAndLocalBlob(blob_watched, handler): parallel_id2consistent_local_blob = {} len_sub_remote_blobs = len(blob_watched.sub_consistent_blob_list) def HandlerParallelIdAndLocalBlob(parallel_id, local_blob): assert parallel_id not in parallel_id2consistent_local_blob parallel_id2consistent_local_blob[parallel_id] = local_blob if len(parallel_id2consistent_local_blob) != len_sub_remote_blobs: return local_blob_list = [ parallel_id2consistent_local_blob[parallel_id] for i in range(len_sub_remote_blobs) ] local_blob = local_blob_util.MergeLocalBlobs(local_blob_list, blob_watched) handler(oft_util.TransformWatchedBlob(local_blob, handler)) return HandlerParallelIdAndLocalBlob def GetTypeAnnotation(blob_watched): if not blob_watched.is_dynamic: return oft.Numpy elif not blob_watched.is_tensor: return oft.ListNumpy else: return oft.ListListNumpy
[ "oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair", "oneflow.python.lib.core.enable_if.condition", "oneflow.python.framework.typing_util.TransformWatchedBlob", "oneflow.python.framework.parallel_conf_util.GetDeviceTagAndMachineDeviceIds", "oneflow.core.operator.op_conf_pb2.OperatorConf", "oneflow.python.framework.watcher.BindUuidAndHandler", "oneflow.python.lib.core.enable_if.unique", "oneflow.python.framework.compile_context.CurJobAddOp", "oneflow.python.framework.local_blob.MakeLocalBlob4EagerBlob", "oneflow.scope.placement", "oneflow.python.framework.typing_util.CheckWatchCallbackParameterAnnotation", "oneflow.python.framework.typing_util.CheckWatchedBlobByAnnotation", "oneflow.core.job.lbi_diff_watcher_info_pb2.LbiAndDiffWatcherUuidPair", "oneflow.python.framework.session_context.GetDefaultSession", "oneflow.python.framework.id_util.UniqueStr", "oneflow.python.framework.local_blob.MergeLocalBlobs", "oneflow.python.oneflow_export.oneflow_export" ]
[((1728, 1751), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""watch"""'], {}), "('watch')\n", (1742, 1751), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2214, 2283), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (2233, 2283), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2543, 2613), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (2562, 2613), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((3907, 3935), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""watch_diff"""'], {}), "('watch_diff')\n", (3921, 3935), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4431, 4500), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (4450, 4500), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((5045, 5115), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (5064, 5115), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2121, 2162), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerWatch, LazyWatch]'], {}), '([EagerWatch, LazyWatch])\n', (2137, 2162), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2422, 2475), 'oneflow.python.framework.local_blob.MakeLocalBlob4EagerBlob', 'local_blob_util.MakeLocalBlob4EagerBlob', (['blob_watched'], {}), '(blob_watched)\n', (2461, 2475), True, 'import oneflow.python.framework.local_blob as local_blob_util\n'), ((3346, 3373), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (3371, 3373), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((3393, 3427), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ForeignWatch_"""'], {}), "('ForeignWatch_')\n", (3410, 3427), True, 'import oneflow.python.framework.id_util as id_util\n'), ((3641, 3719), 'oneflow.python.framework.parallel_conf_util.GetDeviceTagAndMachineDeviceIds', 'parallel_conf_util.GetDeviceTagAndMachineDeviceIds', (['blob_watched.parallel_conf'], {}), '(blob_watched.parallel_conf)\n', (3691, 3719), True, 'import oneflow.python.framework.parallel_conf_util as parallel_conf_util\n'), ((3835, 3903), 'oneflow.python.framework.watcher.BindUuidAndHandler', 'watcher_util.BindUuidAndHandler', (['handler_uuid', 'blob_watched', 'handler'], {}), '(handler_uuid, blob_watched, handler)\n', (3866, 3903), True, 'import oneflow.python.framework.watcher as watcher_util\n'), ((4330, 4379), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerWatchDiff, LazyWatchDiff]'], {}), '([EagerWatchDiff, LazyWatchDiff])\n', (4346, 4379), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((4682, 4709), 'oneflow.core.job.lbi_diff_watcher_info_pb2.LbiAndDiffWatcherUuidPair', 'LbiAndDiffWatcherUuidPair', ([], {}), '()\n', (4707, 4709), False, 'from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair\n'), ((4807, 4883), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', 'c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', (['lbi_and_uuid'], {}), '(lbi_and_uuid)\n', (4869, 4883), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((5869, 5896), 'oneflow.core.job.lbi_diff_watcher_info_pb2.LbiAndDiffWatcherUuidPair', 'LbiAndDiffWatcherUuidPair', ([], {}), '()\n', (5894, 5896), False, 'from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair\n'), ((5994, 6070), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', 'c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', (['lbi_and_uuid'], {}), '(lbi_and_uuid)\n', (6056, 6070), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((6075, 6143), 'oneflow.python.framework.watcher.BindUuidAndHandler', 'watcher_util.BindUuidAndHandler', (['handler_uuid', 'blob_watched', 'handler'], {}), '(handler_uuid, blob_watched, handler)\n', (6106, 6143), True, 'import oneflow.python.framework.watcher as watcher_util\n'), ((2488, 2538), 'oneflow.python.framework.typing_util.TransformWatchedBlob', 'oft_util.TransformWatchedBlob', (['local_blob', 'handler'], {}), '(local_blob, handler)\n', (2517, 2538), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((3318, 3330), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (3328, 3330), False, 'import uuid\n'), ((3743, 3784), 'oneflow.scope.placement', 'oneflow.scope.placement', (['*tag_and_dev_ids'], {}), '(*tag_and_dev_ids)\n', (3766, 3784), False, 'import oneflow\n'), ((3794, 3830), 'oneflow.python.framework.compile_context.CurJobAddOp', 'compile_context.CurJobAddOp', (['op_conf'], {}), '(op_conf)\n', (3821, 3830), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((4649, 4661), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4659, 4661), False, 'import uuid\n'), ((4909, 4940), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4938, 4940), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((5836, 5848), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (5846, 5848), False, 'import uuid\n'), ((6317, 6375), 'oneflow.python.framework.typing_util.CheckWatchCallbackParameterAnnotation', 'oft_util.CheckWatchCallbackParameterAnnotation', (['parameters'], {}), '(parameters)\n', (6363, 6375), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((6455, 6518), 'oneflow.python.framework.typing_util.CheckWatchedBlobByAnnotation', 'oft_util.CheckWatchedBlobByAnnotation', (['blob_watched', 'annotation'], {}), '(blob_watched, annotation)\n', (6492, 6518), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((7979, 8041), 'oneflow.python.framework.local_blob.MergeLocalBlobs', 'local_blob_util.MergeLocalBlobs', (['local_blob_list', 'blob_watched'], {}), '(local_blob_list, blob_watched)\n', (8010, 8041), True, 'import oneflow.python.framework.local_blob as local_blob_util\n'), ((6261, 6297), 'inspect.signature', 'inspect.signature', (['handler_or_prompt'], {}), '(handler_or_prompt)\n', (6278, 6297), False, 'import inspect\n'), ((8058, 8108), 'oneflow.python.framework.typing_util.TransformWatchedBlob', 'oft_util.TransformWatchedBlob', (['local_blob', 'handler'], {}), '(local_blob, handler)\n', (8087, 8108), True, 'import oneflow.python.framework.typing_util as oft_util\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import oneflow.experimental as flow from test_util import GenArgList def _np_bcewithlogitsloss( np_input, np_target, np_weight=None, np_pos_weight=None, reduction="none" ): _neg_input = np.negative(np_input) _max_val = np.clip(_neg_input, 0, None) _neg_max_val = np.negative(_max_val) if np_pos_weight is not None: _log_weight = ((np_pos_weight - 1) * np_target) + 1 _loss = (1 - np_target) * np_input + _log_weight * ( np.log(np.exp(_neg_max_val) + np.exp(_neg_input - _max_val)) + _max_val ) else: _loss = (1 - np_target) * np_input + _max_val _loss += np.log(np.exp(_neg_max_val) + np.exp(_neg_input - _max_val)) if np_weight is not None: assert ( np_weight.shape == np_input.shape ), "The weight shape must be the same as Input shape" _weighted_loss = np_weight * _loss else: _weighted_loss = _loss if reduction == "mean": return _weighted_loss.mean() elif reduction == "sum": return _weighted_loss.sum() else: return _weighted_loss def _np_bcewithlogitsloss_grad(np_input, np_target, np_weight, np_pos_weight): # Use numpy to compute grad elemcnt = np_target.size np_bce_with_logits_grad_mean = -(np_weight / elemcnt) * ( (np_target - 1) + ((1 - np_pos_weight) * np_target - 1) * (-np.exp(-np_input) / (1 + np.exp(-np_input))) ) np_bce_with_logits_grad_sum = np_bce_with_logits_grad_mean * elemcnt return { "mean": np_bce_with_logits_grad_mean, "sum": np_bce_with_logits_grad_sum, "none": np_bce_with_logits_grad_sum, } def _test_bcewithlogitsloss_impl(test_case, device, shape, reduction): x = np.random.randn(*shape).astype(np.float32) y = np.random.randint(0, 2, [*shape]).astype(np.float32) w = np.random.randn(*shape).astype(np.float32) pw = np.random.randn([*shape][-1]).astype(np.float32) input = flow.Tensor( x, dtype=flow.float32, requires_grad=True, device=flow.device(device) ) target = flow.Tensor(y, dtype=flow.float32, device=flow.device(device)) weight = flow.Tensor(w, dtype=flow.float32, device=flow.device(device)) pos_weight = flow.Tensor(pw, dtype=flow.float32, device=flow.device(device)) bcewithlogits_loss = flow.nn.BCEWithLogitsLoss( weight=weight, pos_weight=pos_weight, reduction=reduction ) of_out = bcewithlogits_loss(input, target) np_out = _np_bcewithlogitsloss( x, y, np_weight=w, np_pos_weight=pw, reduction=reduction ) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4)) # Backward test with np: of_out = of_out.sum() of_out.backward() np_grad = _np_bcewithlogitsloss_grad(x, y, np_weight=w, np_pos_weight=pw,)[ reduction ] test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-4, 1e-4)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestBCEWithLogitsLossModule(flow.unittest.TestCase): def test_bcewithlogitsloss(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_bcewithlogitsloss_impl, ] arg_dict["device"] = ["cpu", "cuda"] arg_dict["shape"] = [ (3, 5), (10, 9, 21), (14, 22, 9, 21), (3, 2, 4, 16, 5), (1,), ] arg_dict["reduction"] = ["none", "sum", "mean"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.nn.BCEWithLogitsLoss", "oneflow.experimental.unittest.env.eager_execution_enabled", "oneflow.experimental.device" ]
[((859, 880), 'numpy.negative', 'np.negative', (['np_input'], {}), '(np_input)\n', (870, 880), True, 'import numpy as np\n'), ((896, 924), 'numpy.clip', 'np.clip', (['_neg_input', '(0)', 'None'], {}), '(_neg_input, 0, None)\n', (903, 924), True, 'import numpy as np\n'), ((944, 965), 'numpy.negative', 'np.negative', (['_max_val'], {}), '(_max_val)\n', (955, 965), True, 'import numpy as np\n'), ((3000, 3089), 'oneflow.experimental.nn.BCEWithLogitsLoss', 'flow.nn.BCEWithLogitsLoss', ([], {'weight': 'weight', 'pos_weight': 'pos_weight', 'reduction': 'reduction'}), '(weight=weight, pos_weight=pos_weight, reduction=\n reduction)\n', (3025, 3089), True, 'import oneflow.experimental as flow\n'), ((4303, 4318), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4316, 4318), False, 'import unittest\n'), ((3826, 3839), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3837, 3839), False, 'from collections import OrderedDict\n'), ((4208, 4228), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4218, 4228), False, 'from test_util import GenArgList\n'), ((3616, 3659), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3657, 3659), True, 'import oneflow.experimental as flow\n'), ((2418, 2441), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2433, 2441), True, 'import numpy as np\n'), ((2469, 2502), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '[*shape]'], {}), '(0, 2, [*shape])\n', (2486, 2502), True, 'import numpy as np\n'), ((2530, 2553), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2545, 2553), True, 'import numpy as np\n'), ((2582, 2611), 'numpy.random.randn', 'np.random.randn', (['[*shape][-1]'], {}), '([*shape][-1])\n', (2597, 2611), True, 'import numpy as np\n'), ((2715, 2734), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2726, 2734), True, 'import oneflow.experimental as flow\n'), ((2796, 2815), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2807, 2815), True, 'import oneflow.experimental as flow\n'), ((2872, 2891), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2883, 2891), True, 'import oneflow.experimental as flow\n'), ((2953, 2972), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2964, 2972), True, 'import oneflow.experimental as flow\n'), ((1304, 1324), 'numpy.exp', 'np.exp', (['_neg_max_val'], {}), '(_neg_max_val)\n', (1310, 1324), True, 'import numpy as np\n'), ((1327, 1356), 'numpy.exp', 'np.exp', (['(_neg_input - _max_val)'], {}), '(_neg_input - _max_val)\n', (1333, 1356), True, 'import numpy as np\n'), ((2058, 2075), 'numpy.exp', 'np.exp', (['(-np_input)'], {}), '(-np_input)\n', (2064, 2075), True, 'import numpy as np\n'), ((2083, 2100), 'numpy.exp', 'np.exp', (['(-np_input)'], {}), '(-np_input)\n', (2089, 2100), True, 'import numpy as np\n'), ((1141, 1161), 'numpy.exp', 'np.exp', (['_neg_max_val'], {}), '(_neg_max_val)\n', (1147, 1161), True, 'import numpy as np\n'), ((1164, 1193), 'numpy.exp', 'np.exp', (['(_neg_input - _max_val)'], {}), '(_neg_input - _max_val)\n', (1170, 1193), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import unittest from datetime import datetime import oneflow as flow import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util import oneflow.core.operator.op_conf_pb2 as op_conf_util import oneflow.unittest _DATA_DIR = "/dataset/PNGS/PNG227/of_record_repeated" _MODEL_SAVE_DIR = "./model_save-{}".format( str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) ) _MODEL_LOAD = "/dataset/PNGS/cnns_model_for_test/alexnet/models/of_model_bk" class DLNetSpec(object): def __init__(self): self.batch_size = 8 self.data_part_num = 32 self.eval_dir = _DATA_DIR self.train_dir = _DATA_DIR self.model_save_dir = _MODEL_SAVE_DIR self.model_load_dir = _MODEL_LOAD self.num_nodes = 1 self.node_list = None self.gpu_num_per_node = 1 self.iter_num = 10 global_specs = DLNetSpec() class TrainData(flow.model.DataModule): def __init__(self, specs): super().__init__() self.specs = specs def forward(self, *args): return _data_load_layer(self.specs, self.specs.train_dir) class ValData(flow.model.DataModule): def __init__(self, specs): super().__init__() self.specs = specs def forward(self, *args): return _data_load_layer(self.specs, self.specs.eval_dir) class AlexNet(flow.model.Model): def __init__(self, specs, *args, **kwargs): super().__init__(*args, **kwargs) self.specs = specs def forward(self, images, trainable=False): conv1 = _conv2d_layer( "conv1", images, filters=64, kernel_size=11, strides=4, padding="VALID" ) pool1 = flow.nn.avg_pool2d(conv1, 3, 2, "VALID", "NCHW", name="pool1") conv2 = _conv2d_layer("conv2", pool1, filters=192, kernel_size=5) pool2 = flow.nn.avg_pool2d(conv2, 3, 2, "VALID", "NCHW", name="pool2") conv3 = _conv2d_layer("conv3", pool2, filters=384) conv4 = _conv2d_layer("conv4", conv3, filters=384) conv5 = _conv2d_layer("conv5", conv4, filters=256) pool5 = flow.nn.avg_pool2d(conv5, 3, 2, "VALID", "NCHW", name="pool5") def _get_initializer(): kernel_initializer = initializer_conf_util.InitializerConf() kernel_initializer.truncated_normal_conf.std = 0.816496580927726 return kernel_initializer if len(pool5.shape) > 2: pool5 = flow.reshape(pool5, shape=(pool5.shape[0], -1)) fc1 = flow.layers.dense( inputs=pool5, units=4096, activation=flow.math.relu, use_bias=False, kernel_initializer=_get_initializer(), bias_initializer=False, trainable=trainable, name="fc1", ) dropout1 = fc1 fc2 = flow.layers.dense( inputs=dropout1, units=4096, activation=flow.math.relu, use_bias=False, kernel_initializer=_get_initializer(), bias_initializer=False, trainable=trainable, name="fc2", ) dropout2 = fc2 fc3 = flow.layers.dense( inputs=dropout2, units=1001, activation=None, use_bias=False, kernel_initializer=_get_initializer(), bias_initializer=False, trainable=trainable, name="fc3", ) return fc3 def training_step(self, batch, optimizer_idx): assert optimizer_idx == 0 (images, labels) = batch fc3 = self(images, True) loss = flow.nn.sparse_softmax_cross_entropy_with_logits( labels, fc3, name="softmax_loss" ) return loss def validation_step(self, batch): (images, labels) = batch fc3 = self(images, False) loss = flow.nn.sparse_softmax_cross_entropy_with_logits( labels, fc3, name="softmax_loss" ) return loss def configure_optimizers(self): return flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-05]), momentum=0 ) class LossMoniter(flow.model.Callback): def on_training_step_end(self, step_idx, outputs, optimizer_idx): assert optimizer_idx == 0 loss = outputs.mean() fmt_str = "{:>12} {:>12} {:>12.6f}" print(fmt_str.format(step_idx, "train loss:", loss)) def on_validation_step_end(self, step_idx, outputs): loss = outputs.mean() fmt_str = "{:>12} {:>12} {:>12.6f}" print(fmt_str.format(step_idx, "validation loss:", loss)) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_1n1c(test_case): flow.env.ctrl_port(9788) flow.config.machine_num(global_specs.num_nodes) flow.config.gpu_device_num(global_specs.gpu_num_per_node) train_exe_config = flow.ExecutionConfig() train_exe_config.default_logical_view(flow.scope.consistent_view()) train_exe_config.default_data_type(flow.float) train_config = flow.model.TrainingConfig() train_config.config_execution(train_exe_config) train_config.config_data(TrainData(global_specs)) val_exe_config = flow.ExecutionConfig() val_exe_config.default_logical_view(flow.scope.consistent_view()) val_exe_config.default_data_type(flow.float) val_config = flow.model.ValidationConfig() val_config.config_execution(val_exe_config) val_config.config_data(ValData(global_specs)) val_config.config_step_interval(10) ck_config = flow.model.CheckpointConfig() ck_config.config_load(dirpath=global_specs.model_load_dir) ck_config.config_save(dirpath=global_specs.model_save_dir, step_interval=10) loss_monitor_cb = LossMoniter() alexnet_md = AlexNet(global_specs, is_deprecated_function_style=True) alexnet_md.fit( training_config=train_config, validation_config=val_config, checkpoint_config=ck_config, callbacks=[loss_monitor_cb], max_steps=20, ) def _conv2d_layer( name, input, filters, kernel_size=3, strides=1, padding="SAME", data_format="NCHW", dilation_rate=1, activation=op_conf_util.kRelu, use_bias=False, weight_initializer=flow.random_uniform_initializer(), bias_initializer=flow.random_uniform_initializer(), ): weight_shape = (filters, input.shape[1], kernel_size, kernel_size) weight = flow.get_variable( name + "-weight", shape=weight_shape, dtype=input.dtype, initializer=weight_initializer, ) output = flow.nn.conv2d( input, weight, strides, padding, None, data_format, dilation_rate, name=name ) if use_bias: bias = flow.get_variable( name + "-bias", shape=(filters,), dtype=input.dtype, initializer=bias_initializer, ) output = flow.nn.bias_add(output, bias, data_format) if activation is not None: if activation == op_conf_util.kRelu: output = flow.nn.relu(output) else: raise NotImplementedError return output def _data_load_layer(args, data_dir): node_num = args.num_nodes total_batch_size = args.batch_size * args.gpu_num_per_node * node_num rgb_mean = [123.68, 116.78, 103.94] (image, label) = flow.data.ofrecord_image_classification_reader( data_dir, batch_size=total_batch_size, data_part_num=args.data_part_num, image_feature_name="encoded", label_feature_name="class/label", color_space="RGB", name="decode", ) rsz = flow.image.resize(image, target_size=[227, 227], color_space="RGB") normal = flow.image.crop_mirror_normalize( rsz, color_space="RGB", output_layout="NCHW", mean=rgb_mean, output_dtype=flow.float, ) return (normal, label)
[ "oneflow.nn.relu", "oneflow.image.crop_mirror_normalize", "oneflow.scope.consistent_view", "oneflow.model.TrainingConfig", "oneflow.optimizer.PiecewiseConstantScheduler", "oneflow.model.CheckpointConfig", "oneflow.model.ValidationConfig", "oneflow.get_variable", "oneflow.random_uniform_initializer", "oneflow.nn.bias_add", "oneflow.reshape", "oneflow.env.ctrl_port", "oneflow.image.resize", "oneflow.nn.sparse_softmax_cross_entropy_with_logits", "oneflow.nn.avg_pool2d", "oneflow.ExecutionConfig", "oneflow.config.gpu_device_num", "oneflow.nn.conv2d", "oneflow.config.machine_num", "oneflow.data.ofrecord_image_classification_reader", "oneflow.core.job.initializer_conf_pb2.InitializerConf" ]
[((5314, 5338), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['(9788)'], {}), '(9788)\n', (5332, 5338), True, 'import oneflow as flow\n'), ((5343, 5390), 'oneflow.config.machine_num', 'flow.config.machine_num', (['global_specs.num_nodes'], {}), '(global_specs.num_nodes)\n', (5366, 5390), True, 'import oneflow as flow\n'), ((5395, 5452), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['global_specs.gpu_num_per_node'], {}), '(global_specs.gpu_num_per_node)\n', (5421, 5452), True, 'import oneflow as flow\n'), ((5476, 5498), 'oneflow.ExecutionConfig', 'flow.ExecutionConfig', ([], {}), '()\n', (5496, 5498), True, 'import oneflow as flow\n'), ((5641, 5668), 'oneflow.model.TrainingConfig', 'flow.model.TrainingConfig', ([], {}), '()\n', (5666, 5668), True, 'import oneflow as flow\n'), ((5796, 5818), 'oneflow.ExecutionConfig', 'flow.ExecutionConfig', ([], {}), '()\n', (5816, 5818), True, 'import oneflow as flow\n'), ((5955, 5984), 'oneflow.model.ValidationConfig', 'flow.model.ValidationConfig', ([], {}), '()\n', (5982, 5984), True, 'import oneflow as flow\n'), ((6139, 6168), 'oneflow.model.CheckpointConfig', 'flow.model.CheckpointConfig', ([], {}), '()\n', (6166, 6168), True, 'import oneflow as flow\n'), ((5225, 5259), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5234, 5259), False, 'import os\n'), ((6853, 6886), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (6884, 6886), True, 'import oneflow as flow\n'), ((6909, 6942), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (6940, 6942), True, 'import oneflow as flow\n'), ((7031, 7141), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer)\n", (7048, 7141), True, 'import oneflow as flow\n'), ((7190, 7286), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'None', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, None, data_format,\n dilation_rate, name=name)\n', (7204, 7286), True, 'import oneflow as flow\n'), ((7943, 8175), 'oneflow.data.ofrecord_image_classification_reader', 'flow.data.ofrecord_image_classification_reader', (['data_dir'], {'batch_size': 'total_batch_size', 'data_part_num': 'args.data_part_num', 'image_feature_name': '"""encoded"""', 'label_feature_name': '"""class/label"""', 'color_space': '"""RGB"""', 'name': '"""decode"""'}), "(data_dir, batch_size=\n total_batch_size, data_part_num=args.data_part_num, image_feature_name=\n 'encoded', label_feature_name='class/label', color_space='RGB', name=\n 'decode')\n", (7989, 8175), True, 'import oneflow as flow\n'), ((8234, 8301), 'oneflow.image.resize', 'flow.image.resize', (['image'], {'target_size': '[227, 227]', 'color_space': '"""RGB"""'}), "(image, target_size=[227, 227], color_space='RGB')\n", (8251, 8301), True, 'import oneflow as flow\n'), ((8315, 8438), 'oneflow.image.crop_mirror_normalize', 'flow.image.crop_mirror_normalize', (['rsz'], {'color_space': '"""RGB"""', 'output_layout': '"""NCHW"""', 'mean': 'rgb_mean', 'output_dtype': 'flow.float'}), "(rsz, color_space='RGB', output_layout=\n 'NCHW', mean=rgb_mean, output_dtype=flow.float)\n", (8347, 8438), True, 'import oneflow as flow\n'), ((2256, 2318), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv1', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool1"""'}), "(conv1, 3, 2, 'VALID', 'NCHW', name='pool1')\n", (2274, 2318), True, 'import oneflow as flow\n'), ((2409, 2471), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv2', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool2"""'}), "(conv2, 3, 2, 'VALID', 'NCHW', name='pool2')\n", (2427, 2471), True, 'import oneflow as flow\n'), ((2665, 2727), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv5', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool5"""'}), "(conv5, 3, 2, 'VALID', 'NCHW', name='pool5')\n", (2683, 2727), True, 'import oneflow as flow\n'), ((4191, 4278), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'fc3'], {'name': '"""softmax_loss"""'}), "(labels, fc3, name=\n 'softmax_loss')\n", (4239, 4278), True, 'import oneflow as flow\n'), ((4437, 4524), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'fc3'], {'name': '"""softmax_loss"""'}), "(labels, fc3, name=\n 'softmax_loss')\n", (4485, 4524), True, 'import oneflow as flow\n'), ((5541, 5569), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (5567, 5569), True, 'import oneflow as flow\n'), ((5859, 5887), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (5885, 5887), True, 'import oneflow as flow\n'), ((7329, 7433), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer)\n", (7346, 7433), True, 'import oneflow as flow\n'), ((7506, 7549), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (7522, 7549), True, 'import oneflow as flow\n'), ((2794, 2833), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (2831, 2833), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((3003, 3050), 'oneflow.reshape', 'flow.reshape', (['pool5'], {'shape': '(pool5.shape[0], -1)'}), '(pool5, shape=(pool5.shape[0], -1))\n', (3015, 3050), True, 'import oneflow as flow\n'), ((4646, 4700), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[1e-05]'], {}), '([], [1e-05])\n', (4687, 4700), True, 'import oneflow as flow\n'), ((7647, 7667), 'oneflow.nn.relu', 'flow.nn.relu', (['output'], {}), '(output)\n', (7659, 7667), True, 'import oneflow as flow\n'), ((929, 943), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (941, 943), False, 'from datetime import datetime\n')]
# wujian@2018 import random import numpy as np from oneflow.utils.data.dataloader import default_collate import oneflow.utils.data as dat from .audio import WaveReader def make_dataloader( train=True, data_kwargs=None, num_workers=4, chunk_size=32000, batch_size=16 ): dataset = Dataset(**data_kwargs) return DataLoader( dataset, train=train, chunk_size=chunk_size, batch_size=batch_size, num_workers=num_workers, ) class Dataset(object): """ Per Utterance Loader """ def __init__(self, mix_scp="", ref_scp=None, sample_rate=8000): self.mix = WaveReader(mix_scp, sample_rate=sample_rate) self.ref = [WaveReader(ref, sample_rate=sample_rate) for ref in ref_scp] def __len__(self): return len(self.mix) def __getitem__(self, index): key = self.mix.index_keys[index] mix = self.mix[key] ref = [reader[key] for reader in self.ref] return { "mix": mix.astype(np.float32), "ref": [r.astype(np.float32) for r in ref], } class ChunkSplitter(object): """ Split utterance into small chunks """ def __init__(self, chunk_size, train=True, least=16000): self.chunk_size = chunk_size self.least = least self.train = train def _make_chunk(self, eg, s): """ Make a chunk instance, which contains: "mix": ndarray, "ref": [ndarray...] """ chunk = dict() chunk["mix"] = eg["mix"][s : s + self.chunk_size] chunk["ref"] = [ref[s : s + self.chunk_size] for ref in eg["ref"]] return chunk def split(self, eg): N = eg["mix"].size # too short, throw away if N < self.least: return [] chunks = [] # padding zeros if N < self.chunk_size: P = self.chunk_size - N chunk = dict() chunk["mix"] = np.pad(eg["mix"], (0, P), "constant") chunk["ref"] = [np.pad(ref, (0, P), "constant") for ref in eg["ref"]] chunks.append(chunk) else: # random select start point for training s = random.randint(0, N % self.least) if self.train else 0 while True: if s + self.chunk_size > N: break chunk = self._make_chunk(eg, s) chunks.append(chunk) s += self.least return chunks class DataLoader(object): """ Online dataloader for chunk-level PIT """ def __init__( self, dataset, num_workers=4, chunk_size=32000, batch_size=16, train=True ): self.batch_size = batch_size self.train = train self.splitter = ChunkSplitter(chunk_size, train=train, least=chunk_size // 2) # just return batch of egs, support multiple workers self.eg_loader = dat.DataLoader( dataset, batch_size=batch_size // 2, num_workers=num_workers, shuffle=train, collate_fn=self._collate, ) def _collate(self, batch): """ Online split utterances """ chunk = [] for eg in batch: chunk += self.splitter.split(eg) return chunk def _merge(self, chunk_list): """ Merge chunk list into mini-batch """ N = len(chunk_list) if self.train: random.shuffle(chunk_list) blist = [] for s in range(0, N - self.batch_size + 1, self.batch_size): batch = default_collate(chunk_list[s : s + self.batch_size]) blist.append(batch) rn = N % self.batch_size return blist, chunk_list[-rn:] if rn else [] def __iter__(self): chunk_list = [] for chunks in self.eg_loader: chunk_list += chunks batch, chunk_list = self._merge(chunk_list) for obj in batch: yield obj """ mini_batch like this 'mix': batch x L 'ref': [bathc x L, bathc x L] """
[ "oneflow.utils.data.dataloader.default_collate", "oneflow.utils.data.DataLoader" ]
[((2924, 3045), 'oneflow.utils.data.DataLoader', 'dat.DataLoader', (['dataset'], {'batch_size': '(batch_size // 2)', 'num_workers': 'num_workers', 'shuffle': 'train', 'collate_fn': 'self._collate'}), '(dataset, batch_size=batch_size // 2, num_workers=num_workers,\n shuffle=train, collate_fn=self._collate)\n', (2938, 3045), True, 'import oneflow.utils.data as dat\n'), ((1970, 2007), 'numpy.pad', 'np.pad', (["eg['mix']", '(0, P)', '"""constant"""'], {}), "(eg['mix'], (0, P), 'constant')\n", (1976, 2007), True, 'import numpy as np\n'), ((3474, 3500), 'random.shuffle', 'random.shuffle', (['chunk_list'], {}), '(chunk_list)\n', (3488, 3500), False, 'import random\n'), ((3609, 3659), 'oneflow.utils.data.dataloader.default_collate', 'default_collate', (['chunk_list[s:s + self.batch_size]'], {}), '(chunk_list[s:s + self.batch_size])\n', (3624, 3659), False, 'from oneflow.utils.data.dataloader import default_collate\n'), ((2036, 2067), 'numpy.pad', 'np.pad', (['ref', '(0, P)', '"""constant"""'], {}), "(ref, (0, P), 'constant')\n", (2042, 2067), True, 'import numpy as np\n'), ((2206, 2239), 'random.randint', 'random.randint', (['(0)', '(N % self.least)'], {}), '(0, N % self.least)\n', (2220, 2239), False, 'import random\n')]
import os import sys import argparse import pickle import struct import oneflow.core.record.record_pb2 as of_record def parse_arguement(argv): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/home/qiaojing/git_repo_out/insightface/datasets/faces_emore', help='Root directory to mxnet dataset.') parser.add_argument('--output_filepath', type=str, default='./output', help='Path to output OFRecord.') parser.add_argument('--dataset_name', type=str, default='lfw', help='dataset_name.') return parser.parse_args(argv) def load_bin_data(data_dir, dataset_name): path = os.path.join(data_dir, dataset_name+".bin") try: with open(path, 'rb') as f: bins, issame_list = pickle.load(f) # py2 except UnicodeDecodeError as e: with open(path, 'rb') as f: bins, issame_list = pickle.load(f, encoding='bytes') # py3 return bins, issame_list def convert_to_ofrecord(img_data): """ Convert python dictionary formath data of one image to of.Example proto. Args: img_data: Python dict. Returns: example: The converted of.Exampl """ def _int32_feature(value): """Wrapper for inserting int32 features into Example proto.""" if not isinstance(value, list): value = [value] return of_record.Feature(int32_list=of_record.Int32List(value=value)) def _float_feature(value): """Wrapper for inserting float features into Example proto.""" if not isinstance(value, list): value = [value] return of_record.Feature(float_list=of_record.FloatList(value=value)) def _double_feature(value): """Wrapper for inserting float features into Example proto.""" if not isinstance(value, list): value = [value] return of_record.Feature(double_list=of_record.DoubleList(value=value)) def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" # if isinstance(value, six.string_types): # value = six.binary_type(value, encoding='utf-8') return of_record.Feature(bytes_list=of_record.BytesList(value=[value])) example = of_record.OFRecord(feature={ 'issame': _int32_feature(img_data['label']), 'encoded': _bytes_feature(img_data['pixel_data']), }) return example def main(args): # Convert bin to ofrecord bins, issame_list = load_bin_data( data_dir=args.data_dir, dataset_name=args.dataset_name) output_dir = args.output_filepath if not os.path.exists(output_dir): os.makedirs(output_dir) output_file = os.path.join(output_dir, 'part-0') with open(output_file, 'wb') as f: for idx in range(len(bins)): if idx % 1000 == 0: print("Converting images: {} of {}".format(idx, len(bins))) img_data = {} img_data['label'] = int(issame_list[idx // 2]) if args.dataset_name == "lfw": img_data['pixel_data'] = bins[idx] elif args.dataset_name == "cfp_fp" or args.dataset_name == "agedb_30": img_data['pixel_data'] = bins[idx].tobytes() else: raise NotImplementedError example = convert_to_ofrecord(img_data) l = example.ByteSize() f.write(struct.pack("q", l)) f.write(example.SerializeToString()) if __name__ == '__main__': main(parse_arguement(sys.argv[1:]))
[ "oneflow.core.record.record_pb2.Int32List", "oneflow.core.record.record_pb2.DoubleList", "oneflow.core.record.record_pb2.BytesList", "oneflow.core.record.record_pb2.FloatList" ]
[((160, 185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (183, 185), False, 'import argparse\n'), ((708, 753), 'os.path.join', 'os.path.join', (['data_dir', "(dataset_name + '.bin')"], {}), "(data_dir, dataset_name + '.bin')\n", (720, 753), False, 'import os\n'), ((2749, 2783), 'os.path.join', 'os.path.join', (['output_dir', '"""part-0"""'], {}), "(output_dir, 'part-0')\n", (2761, 2783), False, 'import os\n'), ((2671, 2697), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (2685, 2697), False, 'import os\n'), ((2707, 2730), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2718, 2730), False, 'import os\n'), ((829, 843), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (840, 843), False, 'import pickle\n'), ((955, 987), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (966, 987), False, 'import pickle\n'), ((1460, 1492), 'oneflow.core.record.record_pb2.Int32List', 'of_record.Int32List', ([], {'value': 'value'}), '(value=value)\n', (1479, 1492), True, 'import oneflow.core.record.record_pb2 as of_record\n'), ((1709, 1741), 'oneflow.core.record.record_pb2.FloatList', 'of_record.FloatList', ([], {'value': 'value'}), '(value=value)\n', (1728, 1741), True, 'import oneflow.core.record.record_pb2 as of_record\n'), ((1960, 1993), 'oneflow.core.record.record_pb2.DoubleList', 'of_record.DoubleList', ([], {'value': 'value'}), '(value=value)\n', (1980, 1993), True, 'import oneflow.core.record.record_pb2 as of_record\n'), ((2252, 2286), 'oneflow.core.record.record_pb2.BytesList', 'of_record.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (2271, 2286), True, 'import oneflow.core.record.record_pb2 as of_record\n'), ((3459, 3478), 'struct.pack', 'struct.pack', (['"""q"""', 'l'], {}), "('q', l)\n", (3470, 3478), False, 'import struct\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile OneFlow Models ====================== **Author**: `<NAME> <https://github.com/jkhu29/>`_ This article is an introductory tutorial to deploy OneFlow models with Relay. For us to begin with, OneFlow should be installed. A quick solution is to install via pip .. code-block:: bash python3 -m pip install -f https://release.oneflow.info oneflow==0.6.0+[PLATFORM] All available [PLATFORM] could be seen at official site: https://github.com/Oneflow-Inc/oneflow Currently, TVM supports OneFlow 0.6.0. Other versions may be unstable. """ import tvm from tvm import relay from tvm.contrib.download import download_testdata import os, math import numpy as np from PIL import Image # oneflow imports import oneflow as flow import oneflow.nn as nn from oneflow import Tensor from typing import Type, Any, Callable, Union, List, Optional # prepare for psnr and ssim from skimage.metrics import peak_signal_noise_ratio from skimage.metrics import structural_similarity ###################################################################### # OneFlow model: SRGAN # ------------------------------- # see more at https://github.com/Oneflow-Inc/oneflow_convert_tools/blob/tvm_oneflow/oneflow_tvm/ class Generator(nn.Module): def __init__(self, scale_factor): upsample_block_num = int(math.log(scale_factor, 2)) super(Generator, self).__init__() self.block1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=9, padding=4), nn.PReLU() ) self.block2 = ResidualBlock(64) self.block3 = ResidualBlock(64) self.block4 = ResidualBlock(64) self.block5 = ResidualBlock(64) self.block6 = ResidualBlock(64) self.block7 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.PReLU() ) block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)] block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4)) block8.append(nn.Tanh()) self.block8 = nn.Sequential(*block8) def forward(self, x): block1 = self.block1(x) block2 = self.block2(block1) block3 = self.block3(block2) block4 = self.block4(block3) block5 = self.block5(block4) block6 = self.block6(block5) block7 = self.block7(block6) block8 = self.block8(block1 + block7) return (block8 + 1.) / 2 class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(channels) self.prelu = nn.PReLU() self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(channels) def forward(self, x): residual = self.conv1(x) residual = self.bn1(residual) residual = self.prelu(residual) residual = self.conv2(residual) residual = self.bn2(residual) return x + residual class UpsampleBLock(nn.Module): def __init__(self, in_channels, up_scale): super(UpsampleBLock, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1 ) self.pixel_shuffle = nn.PixelShuffle(up_scale) self.prelu = nn.PReLU() def forward(self, x): x = self.conv(x) x = self.pixel_shuffle(x) x = self.prelu(x) return x ###################################################################### # Load a pretrained OneFlow model # ------------------------------- # We will download and load a pretrained provided in this example: SRGAN. model_url = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/train_data_zjlab/SRGAN_netG_epoch_4_99.zip" model_file = "SRGAN_netG_epoch_4_99.zip" model_path = download_testdata(model_url, model_file, module="oneflow") os.system("unzip -q {}".format(model_path)) model_path = "SRGAN_netG_epoch_4_99" sr_module = Generator(scale_factor=4) pretrain_models = flow.load(model_path) sr_module.load_state_dict(pretrain_models) sr_module.eval() ###################################################################### # Load a test image # ------------------ def load_image(image_path="", size=(224, 224)): img = Image.open(image_path).convert("RGB") img = np.ascontiguousarray(img).astype("float32") / 255 img_flow = flow.Tensor(img).unsqueeze(0).permute(0, 3, 1, 2) return img_flow.numpy(), img_flow img_url = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/train_data_zjlab/monarchx4.png" hr_url = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/train_data_zjlab/monarch.png" img_file = "monarchx4.png" hr_file = "monarch.png" img_path = download_testdata(img_url, img_file, module="data") hr_path = download_testdata(hr_url, hr_file, module="data") img, img_flow = load_image(img_path) ###################################################################### # Compile the model on Relay # --------------------------- # Convert OneFlow graph to Relay graph. class Graph(flow.nn.Graph): def __init__(self, module): super().__init__() self.m = module def build(self, x): out = self.m(x) return out graph = Graph(sr_module) _ = graph._compile(img_flow) mod, params = relay.frontend.from_oneflow(graph, model_path) ###################################################################### # Relay Build and Inference # --------------------------- # Convert OneFlow graph to Relay graph. target = "cuda" with tvm.transform.PassContext(opt_level=10): intrp = relay.build_module.create_executor("graph", mod, tvm.cuda(0), target) dtype="float32" tvm_output = intrp.evaluate()(tvm.nd.array(img.astype(dtype)), **params).numpy() ###################################################################### # Display results # --------------------------------------------- # show the SR result. from matplotlib import pyplot as plt tvm_output = flow.Tensor(tvm_output).squeeze(0).permute(1, 2, 0) * 255 tvm_img = tvm_output.numpy().astype(np.uint8) plt.imshow(tvm_img) plt.show() ###################################################################### # Compare the results # --------------------------- # Compare the evaluation indicators of oneflow and converted relay results. with flow.no_grad(): out = sr_module(img_flow) for mode in ["oneflow", "tvm"]: if mode == "oneflow": out_a = out[0].data.to("cpu") * 255 out_b = out_a.squeeze(0).permute(1, 2, 0) _img = out_b.numpy().astype(np.uint8) elif mode == "tvm": _img = tvm_img if hr_path != "": image_hr = np.array(Image.open(hr_path)) psnr = peak_signal_noise_ratio(image_hr, _img) ssim = structural_similarity(image_hr, _img, multichannel=True) print("{}: psnr:{},ssim:{} \n".format(mode, psnr, ssim))
[ "oneflow.no_grad", "oneflow.nn.Tanh", "oneflow.nn.Conv2d", "oneflow.nn.PixelShuffle", "oneflow.nn.BatchNorm2d", "oneflow.load", "oneflow.Tensor", "oneflow.nn.Sequential", "oneflow.nn.PReLU" ]
[((4644, 4702), 'tvm.contrib.download.download_testdata', 'download_testdata', (['model_url', 'model_file'], {'module': '"""oneflow"""'}), "(model_url, model_file, module='oneflow')\n", (4661, 4702), False, 'from tvm.contrib.download import download_testdata\n'), ((4842, 4863), 'oneflow.load', 'flow.load', (['model_path'], {}), '(model_path)\n', (4851, 4863), True, 'import oneflow as flow\n'), ((5544, 5595), 'tvm.contrib.download.download_testdata', 'download_testdata', (['img_url', 'img_file'], {'module': '"""data"""'}), "(img_url, img_file, module='data')\n", (5561, 5595), False, 'from tvm.contrib.download import download_testdata\n'), ((5606, 5655), 'tvm.contrib.download.download_testdata', 'download_testdata', (['hr_url', 'hr_file'], {'module': '"""data"""'}), "(hr_url, hr_file, module='data')\n", (5623, 5655), False, 'from tvm.contrib.download import download_testdata\n'), ((6113, 6159), 'tvm.relay.frontend.from_oneflow', 'relay.frontend.from_oneflow', (['graph', 'model_path'], {}), '(graph, model_path)\n', (6140, 6159), False, 'from tvm import relay\n'), ((6887, 6906), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tvm_img'], {}), '(tvm_img)\n', (6897, 6906), True, 'from matplotlib import pyplot as plt\n'), ((6907, 6917), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6915, 6917), True, 'from matplotlib import pyplot as plt\n'), ((6351, 6390), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(10)'}), '(opt_level=10)\n', (6376, 6390), False, 'import tvm\n'), ((7123, 7137), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7135, 7137), True, 'import oneflow as flow\n'), ((2788, 2810), 'oneflow.nn.Sequential', 'nn.Sequential', (['*block8'], {}), '(*block8)\n', (2801, 2810), True, 'import oneflow.nn as nn\n'), ((3307, 3362), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(channels, channels, kernel_size=3, padding=1)\n', (3316, 3362), True, 'import oneflow.nn as nn\n'), ((3382, 3406), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (3396, 3406), True, 'import oneflow.nn as nn\n'), ((3428, 3438), 'oneflow.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (3436, 3438), True, 'import oneflow.nn as nn\n'), ((3460, 3515), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(channels, channels, kernel_size=3, padding=1)\n', (3469, 3515), True, 'import oneflow.nn as nn\n'), ((3535, 3559), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (3549, 3559), True, 'import oneflow.nn as nn\n'), ((3952, 4029), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(in_channels * up_scale ** 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)\n', (3961, 4029), True, 'import oneflow.nn as nn\n'), ((4081, 4106), 'oneflow.nn.PixelShuffle', 'nn.PixelShuffle', (['up_scale'], {}), '(up_scale)\n', (4096, 4106), True, 'import oneflow.nn as nn\n'), ((4128, 4138), 'oneflow.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (4136, 4138), True, 'import oneflow.nn as nn\n'), ((6453, 6464), 'tvm.cuda', 'tvm.cuda', (['(0)'], {}), '(0)\n', (6461, 6464), False, 'import tvm\n'), ((7501, 7540), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['image_hr', '_img'], {}), '(image_hr, _img)\n', (7524, 7540), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((7556, 7612), 'skimage.metrics.structural_similarity', 'structural_similarity', (['image_hr', '_img'], {'multichannel': '(True)'}), '(image_hr, _img, multichannel=True)\n', (7577, 7612), False, 'from skimage.metrics import structural_similarity\n'), ((2093, 2118), 'math.log', 'math.log', (['scale_factor', '(2)'], {}), '(scale_factor, 2)\n', (2101, 2118), False, 'import os, math\n'), ((2212, 2254), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(9)', 'padding': '(4)'}), '(3, 64, kernel_size=9, padding=4)\n', (2221, 2254), True, 'import oneflow.nn as nn\n'), ((2256, 2266), 'oneflow.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (2264, 2266), True, 'import oneflow.nn as nn\n'), ((2526, 2569), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 64, kernel_size=3, padding=1)\n', (2535, 2569), True, 'import oneflow.nn as nn\n'), ((2571, 2581), 'oneflow.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (2579, 2581), True, 'import oneflow.nn as nn\n'), ((2689, 2731), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)'], {'kernel_size': '(9)', 'padding': '(4)'}), '(64, 3, kernel_size=9, padding=4)\n', (2698, 2731), True, 'import oneflow.nn as nn\n'), ((2755, 2764), 'oneflow.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2762, 2764), True, 'import oneflow.nn as nn\n'), ((5095, 5117), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5105, 5117), False, 'from PIL import Image\n'), ((7465, 7484), 'PIL.Image.open', 'Image.open', (['hr_path'], {}), '(hr_path)\n', (7475, 7484), False, 'from PIL import Image\n'), ((5143, 5168), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (5163, 5168), True, 'import numpy as np\n'), ((5208, 5224), 'oneflow.Tensor', 'flow.Tensor', (['img'], {}), '(img)\n', (5219, 5224), True, 'import oneflow as flow\n'), ((6783, 6806), 'oneflow.Tensor', 'flow.Tensor', (['tvm_output'], {}), '(tvm_output)\n', (6794, 6806), True, 'import oneflow as flow\n')]
""" Modified from https://github.com/facebookresearch/deit/blob/main/resmlp_models.py """ import oneflow as flow import oneflow.nn as nn import oneflow.nn.init as init from flowvision.layers import DropPath, PatchEmbed, Mlp from .utils import load_state_dict_from_url from .registry import ModelCreator model_urls = { "resmlp_12_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_12.zip", "resmlp_12_distilled_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_12_dist.zip", "resmlp_12_224_dino": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_12_224_dino.zip", "resmlp_24_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_24.zip", "resmlp_24_distilled_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_24_dist.zip", "resmlp_24_224_dino": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_24_224_dino.zip", "resmlp_36_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_36.zip", "resmlp_36_distilled_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlp_36_dist.zip", "resmlp_big_24_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlpB_24.zip", "resmlp_big_24_224_in22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlpB_24_in22k.zip", "resmlp_big_24_distilled_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResMLP/resmlpB_24_dist.zip", } class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(flow.ones(dim)) self.beta = nn.Parameter(flow.zeros(dim)) def forward(self, x): return self.alpha * x + self.beta class layers_scale_mlp_blocks(nn.Module): def __init__( self, dim, drop=0.0, drop_path=0.0, act_layer=nn.GELU, init_values=1e-4, num_patches=196, ): super().__init__() self.norm1 = Affine(dim) self.attn = nn.Linear(num_patches, num_patches) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = Affine(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(4.0 * dim), act_layer=act_layer, drop=drop, ) self.gamma_1 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True) def forward(self, x): x = x + self.drop_path( self.gamma_1 * self.attn(self.norm1(x).transpose(1, 2)).transpose(1, 2) ) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class ResMLP(nn.Module): def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, drop_rate=0.0, Patch_layer=PatchEmbed, act_layer=nn.GELU, drop_path_rate=0.0, init_scale=1e-4, ): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.patch_embed = Patch_layer( img_size=img_size, patch_size=patch_size, in_chans=int(in_chans), embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches dpr = [drop_path_rate for i in range(depth)] self.blocks = nn.ModuleList( [ layers_scale_mlp_blocks( dim=embed_dim, drop=drop_rate, drop_path=dpr[i], act_layer=act_layer, init_values=init_scale, num_patches=num_patches, ) for i in range(depth) ] ) self.norm = Affine(embed_dim) self.feature_info = [dict(num_chs=embed_dim, reduction=0, module="head")] self.head = ( nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() ) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): init.trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=""): self.num_classes = num_classes self.head = ( nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() ) def forward_features(self, x): B = x.shape[0] x = self.patch_embed(x) for i, blk in enumerate(self.blocks): x = blk(x) x = self.norm(x) x = x.mean(dim=1).reshape(B, 1, -1) # (B, N, C) -> (B, 1, C) return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def _create_resmlp(arch, pretrained=False, progress=True, **model_kwargs): model = ResMLP(**model_kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model @ModelCreator.register_model def resmlp_12_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-12 model. .. note:: ResMLP-12 model from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_12_224 = flowvision.models.resmlp_12_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=12, Patch_layer=PatchEmbed, init_scale=0.1, **kwargs, ) return _create_resmlp( "resmlp_12_224", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_12_distilled_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-12 model with distillation. .. note:: ResMLP-12 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlp_12 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_12_distilled_224 = flowvision.models.resmlp_12_distilled_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=12, Patch_layer=PatchEmbed, init_scale=0.1, **kwargs, ) return _create_resmlp( "resmlp_12_distilled_224", pretrained=pretrained, progress=progress, **model_kwargs, ) @ModelCreator.register_model def resmlp_12_224_dino(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-12 model trained under DINO proposed in `"Emerging Properties in Self-Supervised Vision Transformers" <https://arxiv.org/abs/2104.14294>`_. .. note:: ResMLP-12 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlp_12 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_12_224_dino = flowvision.models.resmlp_12_224_dino(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=12, Patch_layer=PatchEmbed, init_scale=0.1, **kwargs, ) return _create_resmlp( "resmlp_12_224_dino", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_24_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-24 model. .. note:: ResMLP-24 model from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_24_224 = flowvision.models.resmlp_24_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=24, Patch_layer=PatchEmbed, init_scale=1e-5, **kwargs, ) return _create_resmlp( "resmlp_24_224", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_24_distilled_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-24 model with distillation. .. note:: ResMLP-24 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlp_24 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_24_distilled_224 = flowvision.models.resmlp_24_distilled_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=24, Patch_layer=PatchEmbed, init_scale=1e-5, **kwargs, ) return _create_resmlp( "resmlp_24_distilled_224", pretrained=pretrained, progress=progress, **model_kwargs, ) @ModelCreator.register_model def resmlp_24_224_dino(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-24 model trained under DINO proposed in `"Emerging Properties in Self-Supervised Vision Transformers" <https://arxiv.org/abs/2104.14294>`_. .. note:: ResMLP-24 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlp_24 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_24_224_dino = flowvision.models.resmlp_24_224_dino(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=24, Patch_layer=PatchEmbed, init_scale=1e-5, **kwargs, ) return _create_resmlp( "resmlp_24_dino_224", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_36_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-36 model. .. note:: ResMLP-36 model from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_36_224 = flowvision.models.resmlp_36_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=36, Patch_layer=PatchEmbed, init_scale=1e-6, **kwargs, ) return _create_resmlp( "resmlp_36_224", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_36_distilled_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-36 model with distillation. .. note:: ResMLP-36 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlp_36 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_36_distilled_224 = flowvision.models.resmlp_36_distilled_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=16, embed_dim=384, depth=36, Patch_layer=PatchEmbed, init_scale=1e-6, **kwargs, ) return _create_resmlp( "resmlp_36_distilled_224", pretrained=pretrained, progress=progress, **model_kwargs, ) @ModelCreator.register_model def resmlp_big_24_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-Big-24 model. .. note:: ResMLP-Big-24 model from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_big_24_224 = flowvision.models.resmlp_big_24_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=8, embed_dim=768, depth=24, Patch_layer=PatchEmbed, init_scale=1e-6, **kwargs, ) return _create_resmlp( "resmlp_big_24_224", pretrained=pretrained, progress=progress, **model_kwargs ) @ModelCreator.register_model def resmlp_big_24_224_in22k_to_1k(pretrained=False, progress=True, **kwargs): """ Constructs the ImageNet22k pretrained ResMLP-B-24 model. .. note:: ImageNet22k pretrained ResMLP-B-24 model from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlpB_24 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_big_24_224_in22k_to_1k = flowvision.models.resmlp_big_24_224_in22k_to_1k(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=8, embed_dim=768, depth=24, Patch_layer=PatchEmbed, init_scale=1e-6, **kwargs, ) return _create_resmlp( "resmlp_big_24_224_in22k_to_1k", pretrained=pretrained, progress=progress, **model_kwargs, ) @ModelCreator.register_model def resmlp_big_24_distilled_224(pretrained=False, progress=True, **kwargs): """ Constructs the ResMLP-B-24 model with distillation. .. note:: ResMLP-B-24 model with distillation from `"ResMLP: Feedforward networks for image classification with data-efficient training" <https://arxiv.org/pdf/2105.03404.pdf>`_. Note that this model is the same as resmlpB_24 but the pretrained weight is different. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> resmlp_big_24_distilled_224 = flowvision.models.resmlp_big_24_distilled_224(pretrained=False, progress=True) """ model_kwargs = dict( patch_size=8, embed_dim=768, depth=24, Patch_layer=PatchEmbed, init_scale=1e-6, **kwargs, ) return _create_resmlp( "resmlp_big_24_distilled_224", pretrained=pretrained, progress=progress, **model_kwargs, )
[ "oneflow.nn.init.constant_", "oneflow.nn.Linear", "oneflow.nn.init.trunc_normal_", "oneflow.nn.Identity", "oneflow.zeros", "oneflow.ones" ]
[((2443, 2478), 'oneflow.nn.Linear', 'nn.Linear', (['num_patches', 'num_patches'], {}), '(num_patches, num_patches)\n', (2452, 2478), True, 'import oneflow.nn as nn\n'), ((2013, 2027), 'oneflow.ones', 'flow.ones', (['dim'], {}), '(dim)\n', (2022, 2027), True, 'import oneflow as flow\n'), ((2062, 2077), 'oneflow.zeros', 'flow.zeros', (['dim'], {}), '(dim)\n', (2072, 2077), True, 'import oneflow as flow\n'), ((2504, 2523), 'flowvision.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (2512, 2523), False, 'from flowvision.layers import DropPath, PatchEmbed, Mlp\n'), ((2548, 2561), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2559, 2561), True, 'import oneflow.nn as nn\n'), ((4488, 4521), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'num_classes'], {}), '(embed_dim, num_classes)\n', (4497, 4521), True, 'import oneflow.nn as nn\n'), ((4546, 4559), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (4557, 4559), True, 'import oneflow.nn as nn\n'), ((4691, 4729), 'oneflow.nn.init.trunc_normal_', 'init.trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (4709, 4729), True, 'import oneflow.nn.init as init\n'), ((5158, 5196), 'oneflow.nn.Linear', 'nn.Linear', (['self.embed_dim', 'num_classes'], {}), '(self.embed_dim, num_classes)\n', (5167, 5196), True, 'import oneflow.nn as nn\n'), ((5221, 5234), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (5232, 5234), True, 'import oneflow.nn as nn\n'), ((2808, 2822), 'oneflow.ones', 'flow.ones', (['dim'], {}), '(dim)\n', (2817, 2822), True, 'import oneflow as flow\n'), ((2896, 2910), 'oneflow.ones', 'flow.ones', (['dim'], {}), '(dim)\n', (2905, 2910), True, 'import oneflow as flow\n'), ((4810, 4838), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4827, 4838), True, 'import oneflow.nn as nn\n'), ((4893, 4921), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4910, 4921), True, 'import oneflow.nn as nn\n'), ((4934, 4966), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (4951, 4966), True, 'import oneflow.nn as nn\n')]
import oneflow as flow import cv2 import numpy as np def conv2d_layer( input, num_filters, kernel_size=3, strides=1, padding="SAME", data_format="NCHW", dilation_rate=1, use_bias=True, weight_initializer=flow.xavier_uniform_initializer(), bias_initializer=flow.zeros_initializer(), name="conv2d", trainable=None ): weight_shape = (num_filters, input.shape[1], kernel_size, kernel_size) with flow.scope.namespace(name): weight = flow.get_variable( "weight", shape=weight_shape, dtype=input.dtype, initializer=weight_initializer, trainable=trainable ) output = flow.nn.conv2d( input, weight, strides, padding, data_format, dilation_rate) if use_bias: with flow.scope.namespace(name): bias = flow.get_variable( "bias", shape=(num_filters,), dtype=input.dtype, initializer=bias_initializer, trainable=trainable ) output = flow.nn.bias_add(output, bias, data_format) return output def deconv(input, out_channel, name_prefix, kernel_size = 4, strides = [2, 2], trainable = True, reuse = True): weight = flow.get_variable( name_prefix + "_weight", shape = (input.shape[1], out_channel, kernel_size, kernel_size), dtype = flow.float, initializer = flow.xavier_uniform_initializer(), trainable = trainable, reuse = reuse ) return flow.nn.conv2d_transpose( input, weight, strides = strides, padding = "SAME", output_shape = (input.shape[0], out_channel, input.shape[2] * strides[0], input.shape[3] * strides[1])) # Input produce batchnorm activation # segmap produce scale and bias def spade(input, segmap, ks=3, pf_norm='batch', trainable=True, name_prefix='spade'): """ @param config: @param norm_nc: the #channels of the normalized activations, hence the output dim of SPADE @param segmap: @param input: @param ks: the size of kernel in the SPADE module (e.g. 3x3) @param pf_norm: the type of parameter-free normalization. (e.g. syncbatch, batch, instance) """ # the number of input.shape[1] equals to the number of norm_nc? norm_nc = input.shape[1] with flow.scope.namespace(name_prefix): def mlp_shared(segmap_interpolate, nhidden, ks, trainable=trainable): out = conv2d_layer(segmap_interpolate, nhidden, kernel_size=ks, trainable=trainable, padding='SAME', name=name_prefix+'mlp_shared') out = flow.nn.relu(out) return out if pf_norm == 'batch': param_free_norm = flow.layers.batch_normalization(input, axis=1, trainable=trainable, name='pf_norm', center=False, scale=False) else: raise ('Other batch methods No implement!') actv = mlp_shared(segmap, 128, ks, trainable=trainable) gamma = conv2d_layer(actv, norm_nc, ks, trainable=trainable, padding='SAME', name='gamma') beta = conv2d_layer(actv, norm_nc, ks, trainable=trainable, padding='SAME', name='beta') out = param_free_norm * (1+gamma) + beta return out def spadeRes(input, segmap, out_c, spectral=True, trainable=True, name_prefix='spadeRes'): learned_shortcut = (input.shape[1] != out_c) # bug middle_c = min(input.shape[1], out_c) if spectral==True: raise ('Have not implement spectral norm') def shortcut(x, seg, trainable=True): if learned_shortcut: if spectral==True: raise ('Have not implement spectral norm') else: with flow.scope.namespace(name_prefix): x_s = conv2d_layer(spade(x, seg), out_c, kernel_size=1, trainable=trainable, use_bias=False,name='shortcut') # there is a relu between conv2d and spade in paper, but disappear in NVlas. else: x_s = x return x_s def actvn(x): return flow.nn.leaky_relu(x, alpha=2e-1) with flow.scope.namespace(name_prefix): x_s = shortcut(input, segmap, trainable=trainable) x = spade(input, segmap, trainable=trainable, name_prefix='spadeRes_spade1') x = actvn(x) x = conv2d_layer(x, middle_c, trainable=trainable, name='spadeRes_conv2d_1') x = spade(x, segmap, trainable=trainable, name_prefix='spadeRes_spade2') x = actvn(x) x = conv2d_layer(x, out_c, trainable=trainable, name='spadeRes_conv2d_2') if learned_shortcut: return x+x_s else: return x
[ "oneflow.nn.relu", "oneflow.xavier_uniform_initializer", "oneflow.nn.leaky_relu", "oneflow.zeros_initializer", "oneflow.nn.bias_add", "oneflow.layers.batch_normalization", "oneflow.nn.conv2d_transpose", "oneflow.nn.conv2d", "oneflow.scope.namespace", "oneflow.get_variable" ]
[((217, 250), 'oneflow.xavier_uniform_initializer', 'flow.xavier_uniform_initializer', ([], {}), '()\n', (248, 250), True, 'import oneflow as flow\n'), ((273, 297), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (295, 297), True, 'import oneflow as flow\n'), ((671, 746), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {}), '(input, weight, strides, padding, data_format, dilation_rate)\n', (685, 746), True, 'import oneflow as flow\n'), ((1533, 1715), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': 'strides', 'padding': '"""SAME"""', 'output_shape': '(input.shape[0], out_channel, input.shape[2] * strides[0], input.shape[3] *\n strides[1])'}), "(input, weight, strides=strides, padding='SAME',\n output_shape=(input.shape[0], out_channel, input.shape[2] * strides[0],\n input.shape[3] * strides[1]))\n", (1557, 1715), True, 'import oneflow as flow\n'), ((422, 448), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (442, 448), True, 'import oneflow as flow\n'), ((467, 590), 'oneflow.get_variable', 'flow.get_variable', (['"""weight"""'], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'trainable': 'trainable'}), "('weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer, trainable=trainable)\n", (484, 590), True, 'import oneflow as flow\n'), ((1064, 1107), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (1080, 1107), True, 'import oneflow as flow\n'), ((2387, 2420), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name_prefix'], {}), '(name_prefix)\n', (2407, 2420), True, 'import oneflow as flow\n'), ((4081, 4113), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['x'], {'alpha': '(0.2)'}), '(x, alpha=0.2)\n', (4099, 4113), True, 'import oneflow as flow\n'), ((4124, 4157), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name_prefix'], {}), '(name_prefix)\n', (4144, 4157), True, 'import oneflow as flow\n'), ((787, 813), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (807, 813), True, 'import oneflow as flow\n'), ((834, 955), 'oneflow.get_variable', 'flow.get_variable', (['"""bias"""'], {'shape': '(num_filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer', 'trainable': 'trainable'}), "('bias', shape=(num_filters,), dtype=input.dtype,\n initializer=bias_initializer, trainable=trainable)\n", (851, 955), True, 'import oneflow as flow\n'), ((1428, 1461), 'oneflow.xavier_uniform_initializer', 'flow.xavier_uniform_initializer', ([], {}), '()\n', (1459, 1461), True, 'import oneflow as flow\n'), ((2662, 2679), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (2674, 2679), True, 'import oneflow as flow\n'), ((2765, 2880), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', (['input'], {'axis': '(1)', 'trainable': 'trainable', 'name': '"""pf_norm"""', 'center': '(False)', 'scale': '(False)'}), "(input, axis=1, trainable=trainable, name=\n 'pf_norm', center=False, scale=False)\n", (2796, 2880), True, 'import oneflow as flow\n'), ((3737, 3770), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name_prefix'], {}), '(name_prefix)\n', (3757, 3770), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np from automated_test_util import * from test_util import GenArgList import oneflow as flow import oneflow.unittest def _test_flip(test_case, device): np_arr = np.arange(0, 16).reshape((2, 2, 2, 2)).astype(np.float32) input = flow.Tensor(np_arr, device=flow.device(device), requires_grad=True) out = flow.flip(input, [0, 1, 2]) np_out = [ [[[14.0, 15.0], [12.0, 13.0]], [[10.0, 11.0], [8.0, 9.0]]], [[[6.0, 7.0], [4.0, 5.0]], [[2.0, 3.0], [0.0, 1.0]]], ] test_case.assertTrue(np.allclose(out.numpy(), np_out, 1e-05, 1e-05)) out = out.sum() out = out.backward() np_grad = np.ones_like(np_arr) test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05, 1e-05)) def _test_flip_input_int(test_case, device): np_arr = np.arange(0, 10).reshape((2, 5)).astype(np.float32) input = flow.Tensor(np_arr, device=flow.device(device), requires_grad=True) out = flow.flip(input, 1) np_out = [[4.0, 3.0, 2.0, 1.0, 0.0], [9.0, 8.0, 7.0, 6.0, 5.0]] test_case.assertTrue(np.allclose(out.numpy(), np_out, 1e-05, 1e-05)) out = out.sum() out = out.backward() np_grad = np.ones_like(np_arr) test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05, 1e-05)) def _test_flip_input_tuple_int(test_case, device): np_arr = np.arange(0, 10).reshape((2, 5)).astype(np.float32) input = flow.Tensor(np_arr, device=flow.device(device), requires_grad=True) out = flow.flip(input, (1)) np_out = [[4.0, 3.0, 2.0, 1.0, 0.0], [9.0, 8.0, 7.0, 6.0, 5.0]] test_case.assertTrue(np.allclose(out.numpy(), np_out, 1e-05, 1e-05)) out = out.sum() out = out.backward() np_grad = np.ones_like(np_arr) test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05, 1e-05)) class TestFlip(flow.unittest.TestCase): def test_flip(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_flip, _test_flip_input_int, _test_flip_input_tuple_int, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.flip", "oneflow.device" ]
[((976, 1003), 'oneflow.flip', 'flow.flip', (['input', '[0, 1, 2]'], {}), '(input, [0, 1, 2])\n', (985, 1003), True, 'import oneflow as flow\n'), ((1287, 1307), 'numpy.ones_like', 'np.ones_like', (['np_arr'], {}), '(np_arr)\n', (1299, 1307), True, 'import numpy as np\n'), ((1591, 1610), 'oneflow.flip', 'flow.flip', (['input', '(1)'], {}), '(input, 1)\n', (1600, 1610), True, 'import oneflow as flow\n'), ((1811, 1831), 'numpy.ones_like', 'np.ones_like', (['np_arr'], {}), '(np_arr)\n', (1823, 1831), True, 'import numpy as np\n'), ((2121, 2140), 'oneflow.flip', 'flow.flip', (['input', '(1)'], {}), '(input, 1)\n', (2130, 2140), True, 'import oneflow as flow\n'), ((2343, 2363), 'numpy.ones_like', 'np.ones_like', (['np_arr'], {}), '(np_arr)\n', (2355, 2363), True, 'import numpy as np\n'), ((2850, 2865), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2863, 2865), False, 'import unittest\n'), ((2536, 2549), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2547, 2549), False, 'from collections import OrderedDict\n'), ((2755, 2775), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2765, 2775), False, 'from test_util import GenArgList\n'), ((925, 944), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (936, 944), True, 'import oneflow as flow\n'), ((1540, 1559), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1551, 1559), True, 'import oneflow as flow\n'), ((2070, 2089), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2081, 2089), True, 'import oneflow as flow\n'), ((828, 844), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (837, 844), True, 'import numpy as np\n'), ((1449, 1465), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1458, 1465), True, 'import numpy as np\n'), ((1979, 1995), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1988, 1995), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np import oneflow.experimental as flow @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestDropout(flow.unittest.TestCase): def test_dropout(test_case): input_arr = np.array( [ [-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, 0.4141], [-1.4115, 1.2183, -0.5503, 0.6520], ] ) m = flow.nn.Dropout(p=0) x = flow.Tensor(input_arr) y = m(x) test_case.assertTrue(np.allclose(y.numpy(), input_arr)) def test_dropout_special_case(test_case): input_arr = np.array( [ [-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, 0.4141], [-1.4115, 1.2183, -0.5503, 0.6520], ] ) output = np.array( [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],] ) m = flow.nn.Dropout(p=1.0) x = flow.Tensor(input_arr) y = m(x) test_case.assertTrue(np.allclose(y.numpy(), output)) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.nn.Dropout", "oneflow.experimental.unittest.env.eager_execution_enabled", "oneflow.experimental.Tensor" ]
[((2380, 2395), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2393, 2395), False, 'import unittest\n'), ((1462, 1582), 'numpy.array', 'np.array', (['[[-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, 0.4141], [-\n 1.4115, 1.2183, -0.5503, 0.652]]'], {}), '([[-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, \n 0.4141], [-1.4115, 1.2183, -0.5503, 0.652]])\n', (1470, 1582), True, 'import numpy as np\n'), ((1676, 1696), 'oneflow.experimental.nn.Dropout', 'flow.nn.Dropout', ([], {'p': '(0)'}), '(p=0)\n', (1691, 1696), True, 'import oneflow.experimental as flow\n'), ((1709, 1731), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1720, 1731), True, 'import oneflow.experimental as flow\n'), ((1880, 2000), 'numpy.array', 'np.array', (['[[-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, 0.4141], [-\n 1.4115, 1.2183, -0.5503, 0.652]]'], {}), '([[-0.7797, 0.2264, 0.2458, 0.4163], [0.4299, 0.3626, -0.4892, \n 0.4141], [-1.4115, 1.2183, -0.5503, 0.652]])\n', (1888, 2000), True, 'import numpy as np\n'), ((2099, 2175), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]])\n', (2107, 2175), True, 'import numpy as np\n'), ((2211, 2233), 'oneflow.experimental.nn.Dropout', 'flow.nn.Dropout', ([], {'p': '(1.0)'}), '(p=1.0)\n', (2226, 2233), True, 'import oneflow.experimental as flow\n'), ((2246, 2268), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2257, 2268), True, 'import oneflow.experimental as flow\n'), ((1277, 1320), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1318, 1320), True, 'import oneflow.experimental as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from oneflow.compatible.single_client.framework import session_context as session_ctx def GetDefaultBackwardBlobRegister(): return session_ctx.GetDefaultSession().backward_blob_register def ReleaseUnusedBlobObject(op_attribute, blob_register): assert op_attribute.HasField("blob_last_used_signature"), op_attribute signature_map = op_attribute.blob_last_used_signature.bn_in_op2blob_last_used bn_in_op2lbi = op_attribute.arg_signature.bn_in_op2lbi for (bn_in_op, is_blob_last_used) in signature_map.items(): if not is_blob_last_used: continue lbi = bn_in_op2lbi[bn_in_op] lbn = "%s/%s" % (lbi.op_name, lbi.blob_name) blob_register.ClearObject4BlobName(lbn) def TrySetBackwardUsedBlobObject(op_attribute, fw_blob_register, bw_blob_register): assert op_attribute.HasField("blob_backward_used_signature"), op_attribute signature_map = ( op_attribute.blob_backward_used_signature.bn_in_op2blob_backward_used ) bn_in_op2lbi = op_attribute.arg_signature.bn_in_op2lbi for (bn_in_op, is_blob_backward_used) in signature_map.items(): if not is_blob_backward_used: continue lbi = bn_in_op2lbi[bn_in_op] lbn = "%s/%s" % (lbi.op_name, lbi.blob_name) blob_object = fw_blob_register.GetObject4BlobName(lbn) bw_blob_register.TrySetObject4BlobName(lbn, blob_object)
[ "oneflow.compatible.single_client.framework.session_context.GetDefaultSession" ]
[((727, 758), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (756, 758), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from test_util import GenArgList import numpy as np import unittest from collections import OrderedDict import random def _test_stack(test_case, device, shape): x = np.random.rand(*shape) y = np.random.rand(*shape) x_tensor = flow.Tensor(x, dtype=flow.float32, device=flow.device(device)) y_tensor = flow.Tensor(y, dtype=flow.float32, device=flow.device(device)) out_np = np.stack([x, y], axis=1) out_of = flow.experimental.stack([x_tensor, y_tensor], dim=1).numpy() test_case.assertTrue(np.allclose(out_np, out_of, 1e-5, 1e-5)) def _test_stack_tuple_input(test_case, device, shape): x = np.random.rand(*shape) y = np.random.rand(*shape) x_tensor = flow.Tensor(x, dtype=flow.float32, device=flow.device(device)) y_tensor = flow.Tensor(y, dtype=flow.float32, device=flow.device(device)) out_np = np.stack([x, y], axis=0) out_of = flow.experimental.stack((x_tensor, y_tensor), dim=0).numpy() test_case.assertTrue(np.allclose(out_np, out_of, 1e-5, 1e-5)) def _test_stack_backward(test_case, device, shape): x = np.random.rand(*shape) y = np.random.rand(*shape) x_tensor = flow.Tensor(x, device=flow.device(device), requires_grad=True) y_tensor = flow.Tensor(y, device=flow.device(device), requires_grad=True) out_of = flow.experimental.stack([x_tensor, y_tensor]).sum() out_of.backward() test_case.assertTrue( np.allclose(x_tensor.grad.numpy(), np.ones(x_tensor.shape), 1e-5, 1e-5) ) test_case.assertTrue( np.allclose(y_tensor.grad.numpy(), np.ones(y_tensor.shape), 1e-5, 1e-5) ) def _test_stack_different_dim(test_case, device, shape): x = np.random.rand(*shape) y = np.random.rand(*shape) x_tensor = flow.Tensor(x, device=flow.device(device)) y_tensor = flow.Tensor(y, device=flow.device(device)) for axis in range(-len(x.shape) - 1, len(x.shape) + 1): out_of = flow.experimental.stack([x_tensor, y_tensor], dim=axis) out_np = np.stack([x, y], axis=axis) test_case.assertTrue(np.allclose(out_np, out_of.numpy(), 1e-05, 1e-05)) def _test_stack_multi_input(test_case, device, shape): # Test 2 to 9 inputs max_input_num = 10 for i in range(2, max_input_num): x = [] x_tensor = [] for _ in range(0, i): tmp = np.random.rand(*shape) x.append(tmp) x_tensor.append(flow.Tensor(tmp, device=flow.device(device))) out_of = flow.experimental.stack(x_tensor, dim=-1) out_np = np.stack(x, axis=-1) test_case.assertTrue(np.allclose(out_np, out_of.numpy(), 1e-05, 1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestStack(flow.unittest.TestCase): def test_stack(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_stack, _test_stack_tuple_input, _test_stack_backward, _test_stack_different_dim, _test_stack_multi_input, ] arg_dict["device"] = ["cpu", "cuda"] # Generate random tuple from 3D to 5D with values ranging from 1 to 9 arg_dict["shape"] = [ tuple(random.randrange(1, 10) for _ in range(i)) for i in range(3, 6) ] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.env.eager_execution_enabled", "oneflow.device", "oneflow.experimental.stack" ]
[((784, 806), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (798, 806), True, 'import numpy as np\n'), ((815, 837), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (829, 837), True, 'import numpy as np\n'), ((1007, 1031), 'numpy.stack', 'np.stack', (['[x, y]'], {'axis': '(1)'}), '([x, y], axis=1)\n', (1015, 1031), True, 'import numpy as np\n'), ((1237, 1259), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1251, 1259), True, 'import numpy as np\n'), ((1268, 1290), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1282, 1290), True, 'import numpy as np\n'), ((1460, 1484), 'numpy.stack', 'np.stack', (['[x, y]'], {'axis': '(0)'}), '([x, y], axis=0)\n', (1468, 1484), True, 'import numpy as np\n'), ((1687, 1709), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1701, 1709), True, 'import numpy as np\n'), ((1718, 1740), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1732, 1740), True, 'import numpy as np\n'), ((2275, 2297), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2289, 2297), True, 'import numpy as np\n'), ((2306, 2328), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2320, 2328), True, 'import numpy as np\n'), ((4027, 4042), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4040, 4042), False, 'import unittest\n'), ((1131, 1172), 'numpy.allclose', 'np.allclose', (['out_np', 'out_of', '(1e-05)', '(1e-05)'], {}), '(out_np, out_of, 1e-05, 1e-05)\n', (1142, 1172), True, 'import numpy as np\n'), ((1584, 1625), 'numpy.allclose', 'np.allclose', (['out_np', 'out_of', '(1e-05)', '(1e-05)'], {}), '(out_np, out_of, 1e-05, 1e-05)\n', (1595, 1625), True, 'import numpy as np\n'), ((2522, 2577), 'oneflow.experimental.stack', 'flow.experimental.stack', (['[x_tensor, y_tensor]'], {'dim': 'axis'}), '([x_tensor, y_tensor], dim=axis)\n', (2545, 2577), True, 'import oneflow as flow\n'), ((2595, 2622), 'numpy.stack', 'np.stack', (['[x, y]'], {'axis': 'axis'}), '([x, y], axis=axis)\n', (2603, 2622), True, 'import numpy as np\n'), ((3071, 3112), 'oneflow.experimental.stack', 'flow.experimental.stack', (['x_tensor'], {'dim': '(-1)'}), '(x_tensor, dim=-1)\n', (3094, 3112), True, 'import oneflow as flow\n'), ((3130, 3150), 'numpy.stack', 'np.stack', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (3138, 3150), True, 'import numpy as np\n'), ((3439, 3452), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3450, 3452), False, 'from collections import OrderedDict\n'), ((3932, 3952), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3942, 3952), False, 'from test_util import GenArgList\n'), ((3259, 3302), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3300, 3302), True, 'import oneflow as flow\n'), ((895, 914), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (906, 914), True, 'import oneflow as flow\n'), ((973, 992), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (984, 992), True, 'import oneflow as flow\n'), ((1045, 1097), 'oneflow.experimental.stack', 'flow.experimental.stack', (['[x_tensor, y_tensor]'], {'dim': '(1)'}), '([x_tensor, y_tensor], dim=1)\n', (1068, 1097), True, 'import oneflow as flow\n'), ((1348, 1367), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1359, 1367), True, 'import oneflow as flow\n'), ((1426, 1445), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1437, 1445), True, 'import oneflow as flow\n'), ((1498, 1550), 'oneflow.experimental.stack', 'flow.experimental.stack', (['(x_tensor, y_tensor)'], {'dim': '(0)'}), '((x_tensor, y_tensor), dim=0)\n', (1521, 1550), True, 'import oneflow as flow\n'), ((1778, 1797), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1789, 1797), True, 'import oneflow as flow\n'), ((1856, 1875), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1867, 1875), True, 'import oneflow as flow\n'), ((1910, 1955), 'oneflow.experimental.stack', 'flow.experimental.stack', (['[x_tensor, y_tensor]'], {}), '([x_tensor, y_tensor])\n', (1933, 1955), True, 'import oneflow as flow\n'), ((2053, 2076), 'numpy.ones', 'np.ones', (['x_tensor.shape'], {}), '(x_tensor.shape)\n', (2060, 2076), True, 'import numpy as np\n'), ((2165, 2188), 'numpy.ones', 'np.ones', (['y_tensor.shape'], {}), '(y_tensor.shape)\n', (2172, 2188), True, 'import numpy as np\n'), ((2366, 2385), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2377, 2385), True, 'import oneflow as flow\n'), ((2424, 2443), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2435, 2443), True, 'import oneflow as flow\n'), ((2931, 2953), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2945, 2953), True, 'import numpy as np\n'), ((3839, 3862), 'random.randrange', 'random.randrange', (['(1)', '(10)'], {}), '(1, 10)\n', (3855, 3862), False, 'import random\n'), ((3032, 3051), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (3043, 3051), True, 'import oneflow as flow\n')]
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow from oneflow import nn from libai.utils import distributed as dist class Linear1D(nn.Module): r"""Linear layer with 1D parallelism which includes column parallelism and row parallelism. The linear layer is defined as :math:`Y = XA + b`. In column parallelism, A is parallelized along the second dimension as :math:`A = [A_1, ..., A_p]`. In row parallelism, A is parallelized along the first dimension and X along its second dimension as: .. math:: A = \begin{bmatrix} A\_1 \\ . \\ . \\ . \\ A\_p \end{bmatrix} x = \begin{bmatrix} x\_1 & ... & x\_p \end{bmatrix} Arguments: in_features: size of each input sample. out_features: size of each output sample. bias: If set to ``False``, the layer will not learn an additive bias. Defaults to ``True``. parallel: Parallel mode. Defaults to "data". init_method: method to initialize weight. Defaults to :func:`nn.init.xavier_normal_`. skip_bias_add: skip adding bias but instead return it, so that adding bias can be fused with other elementwise operations. Defaults to ``False``. layer_idx: A layer_idx sign which determines the placement. It will be used in pipeline parallelism. Defaults to 0. """ def __init__( self, in_features, out_features, bias=True, parallel="data", init_method=nn.init.xavier_normal_, skip_bias_add=False, *, layer_idx=0, # enforce layer_idx passed with keyword ): super().__init__() self.in_features = in_features self.out_features = out_features self.parallel = parallel self.skip_bias_add = skip_bias_add if parallel == "col": # Column parallel weight sbp: [B, S(1)] and bias sbp: [B, S(0)]. weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)]) bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]) elif parallel == "row": # Row parallel weight sbp: [B, S(0)] and bias sbp: [B, B] weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]) bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) elif parallel == "data": weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) else: raise KeyError(f"{parallel} is not supported! Only support ('data', 'row' and 'col')") self.weight = flow.nn.Parameter( flow.empty( (in_features, out_features), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), # for pipeline parallelism placement sbp=weight_sbp, ) ) init_method(self.weight) self.bias = ( flow.nn.Parameter( flow.zeros( (out_features,), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=bias_sbp, ) ) if bias else None ) def forward(self, x): if dist.same_sbp(self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)])): # if the last dim of weight sbp sign is S(1), the last dim of x sbp sign must be B. if self.weight.sbp[-1] == flow.sbp.split(1): x_sbp = x.sbp[:-1] + (flow.sbp.broadcast,) x = x.to_global(sbp=x_sbp) # x.grad sbp must be x.sbp, otherwise backward pass cannot be performed correctly. x = x.to_global(grad_sbp=x.sbp) x = flow.matmul(x, self.weight) elif dist.same_sbp( self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]) ): # if the last dim of weight sbp sign is S(0), the last dim of x sbp # sign must be S(ndim-1). if self.weight.sbp[-1] == flow.sbp.split(0): x_sbp = x.sbp[:-1] + (flow.sbp.split(x.ndim - 1),) x = x.to_global(sbp=x_sbp) out_sbp = x.sbp[:-1] + (flow.sbp.broadcast,) else: out_sbp = x.sbp x = flow.matmul(x, self.weight) # Change x.sbp for followup forward pass. # This line can be removed when sbp can be auto inferred. x = x.to_global(sbp=out_sbp) elif dist.same_sbp( self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) ): # x.grad sbp must be x.sbp, otherwise backward pass cannot be performed correctly. x = x.to_global(grad_sbp=x.sbp) # Change x.sbp to [S(0), S(0)] if weight is [B, B] x = x.to_global(sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.split(0)])) x = flow.matmul(x, self.weight) else: raise NotImplementedError(f"Not support weight with sbp: {self.weight.sbp}") if self.bias is not None: if self.skip_bias_add: return x, self.bias else: return x + self.bias else: return x def extra_repr(self) -> str: return "in_features={}, out_features={}, bias={}, parallel={}".format( self.in_features, self.out_features, self.bias is not None, self.parallel, ) # Give an alias for Linear1d Linear = Linear1D
[ "oneflow.matmul", "oneflow.sbp.split" ]
[((4567, 4594), 'oneflow.matmul', 'flow.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (4578, 4594), True, 'import oneflow as flow\n'), ((2979, 3036), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (2994, 3036), True, 'from libai.utils import distributed as dist\n'), ((4290, 4307), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (4304, 4307), True, 'import oneflow as flow\n'), ((5134, 5161), 'oneflow.matmul', 'flow.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (5145, 5161), True, 'import oneflow as flow\n'), ((2672, 2689), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (2686, 2689), True, 'import oneflow as flow\n'), ((2752, 2769), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2766, 2769), True, 'import oneflow as flow\n'), ((3095, 3152), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (3110, 3152), True, 'from libai.utils import distributed as dist\n'), ((3176, 3233), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (3191, 3233), True, 'from libai.utils import distributed as dist\n'), ((3520, 3555), 'libai.utils.distributed.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (3544, 3555), True, 'from libai.utils import distributed as dist\n'), ((4134, 4151), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (4148, 4151), True, 'import oneflow as flow\n'), ((4877, 4894), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4891, 4894), True, 'import oneflow as flow\n'), ((5384, 5441), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (5399, 5441), True, 'from libai.utils import distributed as dist\n'), ((5760, 5787), 'oneflow.matmul', 'flow.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (5771, 5787), True, 'import oneflow as flow\n'), ((2936, 2953), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2950, 2953), True, 'import oneflow as flow\n'), ((3873, 3908), 'libai.utils.distributed.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (3897, 3908), True, 'from libai.utils import distributed as dist\n'), ((4690, 4707), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4704, 4707), True, 'import oneflow as flow\n'), ((4934, 4960), 'oneflow.sbp.split', 'flow.sbp.split', (['(x.ndim - 1)'], {}), '(x.ndim - 1)\n', (4948, 4960), True, 'import oneflow as flow\n'), ((5704, 5721), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5718, 5721), True, 'import oneflow as flow\n'), ((5723, 5740), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5737, 5740), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random as rd import unittest from collections import OrderedDict import numpy as np from automated_test_util import * from test_util import GenArgList import oneflow as flow import oneflow.unittest def _numpy_fmod(x, y): sign = np.sign(x) res = np.fmod(np.abs(x), np.abs(y)) return sign * res def _numpy_fmod_grad(x): grad = np.ones_like(x) return grad def _test_fmod_same_shape_tensor(test_case, shape, device): input = flow.Tensor( np.random.uniform(-100, 100, shape), dtype=flow.float32, device=flow.device(device), requires_grad=True, ) other = flow.Tensor( np.random.uniform(-10, 10, shape), dtype=flow.float32, device=flow.device(device), ) of_out = flow.fmod(input, other) np_out = _numpy_fmod(input.numpy(), other.numpy()) of_out.sum().backward() test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05)) test_case.assertTrue( np.allclose(input.grad.numpy(), _numpy_fmod_grad(input.numpy()), 1e-05, 1e-05) ) def _test_fmod_tensor_vs_scalar(test_case, shape, device): input = flow.Tensor( np.random.randint(-100, 100, shape), dtype=flow.float32, device=flow.device(device), requires_grad=True, ) other = rd.uniform(-1, 1) * 100 of_out = flow.fmod(input, other) np_out = _numpy_fmod(input.numpy(), other) of_out.sum().backward() test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05)) test_case.assertTrue( np.allclose(input.grad.numpy(), _numpy_fmod_grad(input.numpy()), 1e-05, 1e-05) ) class TestFmodModule(flow.unittest.TestCase): def test_fmod(test_case): arg_dict = OrderedDict() arg_dict["fun"] = [_test_fmod_same_shape_tensor, _test_fmod_tensor_vs_scalar] arg_dict["shape"] = [(2,), (2, 3), (2, 4, 5, 6)] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) @autotest def test_flow_fmod_with_random_data(test_case): device = random_device() input = random_pytorch_tensor().to(device) other = random_pytorch_tensor().to(device) return torch.fmod(input, other) if __name__ == "__main__": unittest.main()
[ "oneflow.device", "oneflow.fmod" ]
[((834, 844), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (841, 844), True, 'import numpy as np\n'), ((945, 960), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (957, 960), True, 'import numpy as np\n'), ((1358, 1381), 'oneflow.fmod', 'flow.fmod', (['input', 'other'], {}), '(input, other)\n', (1367, 1381), True, 'import oneflow as flow\n'), ((1938, 1961), 'oneflow.fmod', 'flow.fmod', (['input', 'other'], {}), '(input, other)\n', (1947, 1961), True, 'import oneflow as flow\n'), ((2887, 2902), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2900, 2902), False, 'import unittest\n'), ((863, 872), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (869, 872), True, 'import numpy as np\n'), ((874, 883), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (880, 883), True, 'import numpy as np\n'), ((1072, 1107), 'numpy.random.uniform', 'np.random.uniform', (['(-100)', '(100)', 'shape'], {}), '(-100, 100, shape)\n', (1089, 1107), True, 'import numpy as np\n'), ((1240, 1273), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', 'shape'], {}), '(-10, 10, shape)\n', (1257, 1273), True, 'import numpy as np\n'), ((1754, 1789), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(100)', 'shape'], {}), '(-100, 100, shape)\n', (1771, 1789), True, 'import numpy as np\n'), ((1901, 1918), 'random.uniform', 'rd.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1911, 1918), True, 'import random as rd\n'), ((2329, 2342), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2340, 2342), False, 'from collections import OrderedDict\n'), ((2550, 2570), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2560, 2570), False, 'from test_util import GenArgList\n'), ((1152, 1171), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1163, 1171), True, 'import oneflow as flow\n'), ((1318, 1337), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1329, 1337), True, 'import oneflow as flow\n'), ((1834, 1853), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1845, 1853), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import oneflow.experimental as flow from test_util import GenArgList @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestReLUModule(flow.unittest.TestCase): def test_relu(test_case): m = flow.nn.ReLU() arr = np.random.randn(2, 3, 4, 5) np_out = np.maximum(0, arr) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestReLU6Module(flow.unittest.TestCase): def test_relu6(test_case): m = flow.nn.ReLU6() arr = np.random.randn(2, 3, 4, 5) np_out = np.minimum(np.maximum(0, arr), 6.0) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestTanhModule(flow.unittest.TestCase): def _test_body_tanh(test_case, input_arr): x = flow.Tensor(input_arr) tanh = flow.nn.Tanh() y = tanh(x) z = np.tanh(input_arr) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) def _test_ones_body_tanh(self, shape): x = np.ones(shape, dtype=np.float32) self._test_body_tanh(x) def _test_random_body_tanh(self, shape): x = np.random.random(shape).astype(np.float32) self._test_body_tanh(x) def test_ones_input_tanh(self): self._test_ones_body_tanh((1)) self._test_ones_body_tanh((1, 10)) self._test_ones_body_tanh((2, 10, 2)) self._test_ones_body_tanh((2, 5, 2, 2)) def test_random_input_tanh(self): self._test_random_body_tanh((1)) self._test_random_body_tanh((1, 10)) self._test_random_body_tanh((2, 10, 2)) self._test_random_body_tanh((2, 5, 2, 2)) def _test_body_tanh_v2(test_case, input_arr): x = flow.Tensor(input_arr) y = flow.tanh(x) z = np.tanh(input_arr) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) def _test_body_tanh_v3(test_case, input_arr): x = flow.Tensor(input_arr) y = x.tanh() z = np.tanh(input_arr) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestELUModule(flow.unittest.TestCase): def test_elu(test_case): m = flow.nn.ELU() arr = np.random.randn(2, 3, 4, 5) np_out = np.where(arr > 0, arr, 1.0 * (np.exp(arr) - 1)) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-4, atol=1e-4)) def test_elu_alpha(test_case): m = flow.nn.ELU(alpha=1.2) arr = np.random.randn(2, 3, 4, 5) np_out = np.where(arr > 0, arr, 1.2 * (np.exp(arr) - 1)) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-4, atol=1e-4)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestGeLU(flow.unittest.TestCase): def test_gelu_v1(test_case): input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32) x = flow.Tensor(input_arr) gelu = flow.nn.GELU() y = gelu(x) z = np.array([-0.15426877, 0.0, 0.34573123]) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) def test_gelu_v2(test_case): input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32) x = flow.Tensor(input_arr) y = flow.gelu(x) z = np.array([-0.15426877, 0.0, 0.34573123]) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) def test_gelu_v3(test_case): input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32) x = flow.Tensor(input_arr) y = x.gelu() z = np.array([-0.15426877, 0.0, 0.34573123]) test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4)) def numpy_sigmoid(x): return 1.0 / (1 + np.exp(-x)) def numpy_sigmoid_grad(inputs, grads): x = np.exp(-inputs) delta = x / (1 + x) ** 2 return delta * grads def numpy_softmax(x, axis): x = x - x.max(axis=axis, keepdims=True) y = np.exp(x) return y / y.sum(axis=axis, keepdims=True) def numpy_logsoftmax(x, dim): e_x = np.exp(x - np.max(x, axis=dim, keepdims=True)) return np.log(e_x / e_x.sum(axis=dim, keepdims=True)) def _test_sigmoid(test_case, device): m = flow.nn.Sigmoid() input_arr = np.random.randn(2, 3, 4, 5) x = flow.Tensor(input_arr, device=flow.device(device)) y = m(x) y2 = flow.sigmoid(x) y3 = x.sigmoid() output = numpy_sigmoid(input_arr) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) test_case.assertTrue(np.allclose(y2.numpy(), output, rtol=1e-05)) test_case.assertTrue(np.allclose(y3.numpy(), output, rtol=1e-05)) def _test_sigmoid_backward(test_case, device): input_arr = np.random.randn(2, 3, 4, 5) x = flow.Tensor(input_arr, device=flow.device(device), requires_grad=True) x_grad = numpy_sigmoid_grad(input_arr, np.ones(input_arr.shape)) m = flow.nn.Sigmoid() y = m(x).sum() y.backward() test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestSigmoid(flow.unittest.TestCase): def test_sigmoid(test_case): arg_dict = OrderedDict() arg_dict["fun"] = [ _test_sigmoid, _test_sigmoid_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) def _test_softmax(test_case, device): axis = 0 m = flow.nn.Softmax(dim=axis) arr = np.random.randn(2, 3, 4, 5) x = flow.Tensor(arr, device=flow.device(device)) y = m(x) output = numpy_softmax(arr, axis) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_softmax_dim_1(test_case, device): axis = 1 m = flow.nn.Softmax(dim=axis) arr = np.random.randn(9, 7, 8, 16) x = flow.Tensor(arr, device=flow.device(device)) y = m(x) output = numpy_softmax(arr, axis) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_softmax_dim_2(test_case, device): axis = 2 m = flow.nn.Softmax(dim=axis) arr = np.random.randn(2, 5, 6, 3) x = flow.Tensor(arr, device=flow.device(device)) y = m(x) output = numpy_softmax(arr, axis) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_softmax_dim_3(test_case, device): axis = 3 m = flow.nn.Softmax(dim=axis) arr = np.random.randn(1, 3, 4, 7) x = flow.Tensor(arr, device=flow.device(device)) y = m(x) output = numpy_softmax(arr, axis) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) axis2 = -1 m2 = flow.nn.Softmax(dim=axis) y2 = m(x) output2 = numpy_softmax(arr, axis) test_case.assertTrue(np.allclose(y2.numpy(), output2, rtol=1e-05)) softmax_input_arr = np.array( [ [ [ [2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0, 5.0, 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0], ], [ [1.0, 1.0, 5.0, 3.0, 5.0], [3.0, 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, 1.0, 1.0], ], [ [8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0, 9.0, 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0], ], ], [ [ [3.0, 5.0, 3.0, 1.0, 7.0], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, 4.0, 5.0, 1.0], ], [ [7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0], [9.0, 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0], ], [ [6.0, 7.0, 5.0, 3.0, 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, 3.0, 8.0, 6.0, 2.0], ], ], ] ) def _test_softmax_backward(test_case, device): x_grad = np.array( [ [ [ [ 0.00000000e00, 0.00000000e00, -2.21495572e-16, 9.77881196e-17, -1.05306593e-17, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, 1.32341829e-17, 0.00000000e00, ], [ 0.00000000e00, -2.21495572e-16, -1.05306593e-17, 9.77881196e-17, 0.00000000e00, ], [ 0.00000000e00, -1.05306593e-17, -2.11513946e-16, -2.11513946e-16, 0.00000000e00, ], ], [ [ -5.49032632e-19, 0.00000000e00, 1.32341829e-17, 9.77881196e-17, 0.00000000e00, ], [ 0.00000000e00, -2.11513946e-16, -1.05306593e-17, 0.00000000e00, 0.00000000e00, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, -1.05306593e-17, 0.00000000e00, ], [ 0.00000000e00, 0.00000000e00, -1.05306593e-17, 0.00000000e00, -1.48611144e-18, ], ], [ [ 9.77881196e-17, 0.00000000e00, 0.00000000e00, 0.00000000e00, 1.32341829e-17, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, -2.20558493e-16, ], [ 0.00000000e00, -2.21495572e-16, 0.00000000e00, 0.00000000e00, -1.05306593e-17, ], [ 0.00000000e00, 1.32341829e-17, -5.49032632e-19, -1.05306593e-17, 0.00000000e00, ], ], ], [ [ [ 0.00000000e00, 0.00000000e00, -5.49032632e-19, 1.32341829e-17, -2.11513946e-16, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, 9.77881196e-17, 0.00000000e00, ], [ 0.00000000e00, -5.49032632e-19, -2.11513946e-16, 1.32341829e-17, 0.00000000e00, ], [ 0.00000000e00, -2.11513946e-16, -1.05306593e-17, -1.05306593e-17, 0.00000000e00, ], ], [ [ -2.21495572e-16, 0.00000000e00, 9.77881196e-17, 1.32341829e-17, 0.00000000e00, ], [ 0.00000000e00, -1.05306593e-17, -2.11513946e-16, 0.00000000e00, 0.00000000e00, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, -2.11513946e-16, 0.00000000e00, ], [ 0.00000000e00, 0.00000000e00, -2.11513946e-16, 0.00000000e00, -2.20558493e-16, ], ], [ [ 1.32341829e-17, 0.00000000e00, 0.00000000e00, 0.00000000e00, 9.77881196e-17, ], [ 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, -1.48611144e-18, ], [ 0.00000000e00, -5.49032632e-19, 0.00000000e00, 0.00000000e00, -2.11513946e-16, ], [ 0.00000000e00, 9.77881196e-17, -2.21495572e-16, -2.11513946e-16, 0.00000000e00, ], ], ], ] ) axis = 0 m = flow.nn.Softmax(dim=axis) x = flow.Tensor( softmax_input_arr, requires_grad=True, device=flow.device(device), dtype=flow.float64, ) y = m(x).sum() y.backward() test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-5, 1e-5)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestHardsigmoidModule(flow.unittest.TestCase): def test_hardsigmoid(test_case): m = flow.nn.Hardsigmoid() arr = np.random.randn(2, 3, 4, 5) np_out = np.maximum(0, np.minimum(1, (arr + 3) / 6)) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestSoftmax(flow.unittest.TestCase): def test_softmax(test_case): arg_dict = OrderedDict() arg_dict["fun"] = [ _test_softmax, _test_softmax_dim_1, _test_softmax_dim_2, _test_softmax_dim_3, _test_softmax_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) def _test_logsoftmax(test_case, device): dim = 1 m = flow.nn.LogSoftmax(dim) input_arr = np.random.randn(4, 7) x = flow.Tensor(input_arr, device=flow.device(device)) y = m(x) output = numpy_logsoftmax(input_arr, dim) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_logsoftmax_dim_2(test_case, device): dim = 2 m = flow.nn.LogSoftmax(dim) input_arr = np.random.randn(3, 4, 5) x = flow.Tensor(input_arr, device=flow.device(device)) y = m(x) output = numpy_logsoftmax(input_arr, dim) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_logsoftmax_dim_3(test_case, device): dim = 3 m = flow.nn.LogSoftmax(dim) input_arr = np.random.randn(8, 9, 7, 3) x = flow.Tensor(input_arr, device=flow.device(device)) y = m(x) output = numpy_logsoftmax(input_arr, dim) test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05)) def _test_logsoftmax_backward(test_case, device): axis = 0 m = flow.nn.LogSoftmax(axis) x = flow.Tensor( softmax_input_arr, requires_grad=True, device=flow.device(device), dtype=flow.float64, ) x_grad = np.array( [ [ [ [0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825], [0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.99932930], [0.00000000, 0.90514825, -0.90514825, -0.90514825, -0.96402758], ], [ [0.99505475, 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.00000000, -0.90514825, 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.00000000, 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.99817790, 0.98661430], ], [ [-0.76159416, 0.96402758, -0.46211716, 0.00000000, 0.76159416], [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.98661430], [0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758], ], ], [ [ [-0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.99932930], [0.00000000, -0.90514825, 0.90514825, 0.90514825, 0.96402758], ], [ [-0.99505475, -0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.00000000, 0.90514825, -0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.00000000, -0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -0.99817790, -0.98661430], ], [ [0.76159416, -0.96402758, 0.46211716, 0.00000000, -0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, 0.98661430], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, 0.96402758], ], ], ] ) y = m(x) z = y.sum() z.backward() test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-5, 1e-5)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestLogSoftmax(flow.unittest.TestCase): def test_log_softmax(test_case): arg_dict = OrderedDict() arg_dict["fun"] = [ _test_logsoftmax, _test_logsoftmax_dim_2, _test_logsoftmax_dim_3, _test_logsoftmax_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestLogSigmoidModule(flow.unittest.TestCase): def test_logsigmoid(test_case): m = flow.nn.LogSigmoid() arr = np.random.randn(2, 3, 4, 5) np_out = np.log(1.0 / (1.0 + np.exp(-arr))) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestSoftplusModule(flow.unittest.TestCase): def test_softplus(test_case): m = flow.nn.Softplus() arr = np.random.randn(2, 3, 4, 5) np_out = np.where(arr > 20, arr, np.log(1.0 + np.exp(1.0 * arr))) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) def test_softplus_beta(test_case): m = flow.nn.Softplus(beta=1.11) arr = np.random.randn(2, 3, 4, 5) np_out = np.where( arr * 1.11 > 20, arr, 1.0 / 1.11 * np.log(1.0 + np.exp(1.11 * arr)) ) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) def test_softplus_threshold(test_case): m = flow.nn.Softplus(beta=1.11, threshold=1.55) arr = np.random.randn(2, 3, 4, 5) np_out = np.where( arr * 1.11 > 1.55, arr, 1.0 / 1.11 * np.log(1.0 + np.exp(1.11 * arr)) ) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestHardswishModule(flow.unittest.TestCase): def test_hardswish(test_case): m = flow.nn.Hardswish() arr = np.random.randn(2, 3, 4, 5) f = arr + 3 relu6 = np.where(np.where(f < 0, 0, f) > 6, 6, np.where(f < 0, 0, f)) np_out = arr * relu6 / 6 x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestHardtanhModule(flow.unittest.TestCase): def test_hardtanh(test_case): m = flow.nn.Hardtanh() arr = np.random.randn(2, 3, 4, 5) np_out = np.maximum(-1, np.minimum(1, arr)) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) def test_hardtanh_min_max(test_case): m = flow.nn.Hardtanh(min_val=-2.0, max_val=2.3) arr = np.random.randn(2, 3, 4, 5) np_out = np.maximum(-2.0, np.minimum(2.3, arr)) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestLeakyReLUModule(flow.unittest.TestCase): def test_leaky_relu(test_case): negative_slope = 0.2 m = flow.nn.LeakyReLU(negative_slope=negative_slope) arr = np.random.randn(2, 3, 4, 5) np_out = np.maximum(0, arr) + negative_slope * np.minimum(0, arr) x = flow.Tensor(arr) of_out = m(x) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05)) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.nn.Hardswish", "oneflow.experimental.nn.GELU", "oneflow.experimental.nn.ReLU6", "oneflow.experimental.nn.ReLU", "oneflow.experimental.nn.LogSigmoid", "oneflow.experimental.nn.Softmax", "oneflow.experimental.unittest.env.eager_execution_enabled", "oneflow.experimental.nn.Hardsigmoid", "oneflow.experimental.gelu", "oneflow.experimental.nn.ELU", "oneflow.experimental.Tensor", "oneflow.experimental.device", "oneflow.experimental.nn.LogSoftmax", "oneflow.experimental.sigmoid", "oneflow.experimental.nn.Softplus", "oneflow.experimental.nn.Sigmoid", "oneflow.experimental.tanh", "oneflow.experimental.nn.Hardtanh", "oneflow.experimental.nn.Tanh", "oneflow.experimental.nn.LeakyReLU" ]
[((8153, 8866), 'numpy.array', 'np.array', (['[[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0, 5.0, \n 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0], [3.0,\n 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, 1.0, \n 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0, 9.0,\n 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0, 7.0\n ], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, 4.0,\n 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0], [9.0,\n 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0, 3.0, \n 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, 3.0, \n 8.0, 6.0, 2.0]]]]'], {}), '([[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0,\n 5.0, 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0],\n [3.0, 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, \n 1.0, 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0,\n 9.0, 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0,\n 7.0], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, \n 4.0, 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0],\n [9.0, 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0,\n 3.0, 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, \n 3.0, 8.0, 6.0, 2.0]]]])\n', (8161, 8866), True, 'import numpy as np\n'), ((5082, 5097), 'numpy.exp', 'np.exp', (['(-inputs)'], {}), '(-inputs)\n', (5088, 5097), True, 'import numpy as np\n'), ((5234, 5243), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5240, 5243), True, 'import numpy as np\n'), ((5486, 5503), 'oneflow.experimental.nn.Sigmoid', 'flow.nn.Sigmoid', ([], {}), '()\n', (5501, 5503), True, 'import oneflow.experimental as flow\n'), ((5520, 5547), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (5535, 5547), True, 'import numpy as np\n'), ((5630, 5645), 'oneflow.experimental.sigmoid', 'flow.sigmoid', (['x'], {}), '(x)\n', (5642, 5645), True, 'import oneflow.experimental as flow\n'), ((5980, 6007), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (5995, 6007), True, 'import numpy as np\n'), ((6164, 6181), 'oneflow.experimental.nn.Sigmoid', 'flow.nn.Sigmoid', ([], {}), '()\n', (6179, 6181), True, 'import oneflow.experimental as flow\n'), ((6806, 6831), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6821, 6831), True, 'import oneflow.experimental as flow\n'), ((6842, 6869), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (6857, 6869), True, 'import numpy as np\n'), ((7110, 7135), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (7125, 7135), True, 'import oneflow.experimental as flow\n'), ((7146, 7174), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(8)', '(16)'], {}), '(9, 7, 8, 16)\n', (7161, 7174), True, 'import numpy as np\n'), ((7415, 7440), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (7430, 7440), True, 'import oneflow.experimental as flow\n'), ((7451, 7478), 'numpy.random.randn', 'np.random.randn', (['(2)', '(5)', '(6)', '(3)'], {}), '(2, 5, 6, 3)\n', (7466, 7478), True, 'import numpy as np\n'), ((7719, 7744), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (7734, 7744), True, 'import oneflow.experimental as flow\n'), ((7755, 7782), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(4)', '(7)'], {}), '(1, 3, 4, 7)\n', (7770, 7782), True, 'import numpy as np\n'), ((7981, 8006), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (7996, 8006), True, 'import oneflow.experimental as flow\n'), ((9487, 10856), 'numpy.array', 'np.array', (['[[[[0.0, 0.0, -2.21495572e-16, 9.77881196e-17, -1.05306593e-17], [0.0, 0.0,\n 0.0, 1.32341829e-17, 0.0], [0.0, -2.21495572e-16, -1.05306593e-17, \n 9.77881196e-17, 0.0], [0.0, -1.05306593e-17, -2.11513946e-16, -\n 2.11513946e-16, 0.0]], [[-5.49032632e-19, 0.0, 1.32341829e-17, \n 9.77881196e-17, 0.0], [0.0, -2.11513946e-16, -1.05306593e-17, 0.0, 0.0],\n [0.0, 0.0, 0.0, -1.05306593e-17, 0.0], [0.0, 0.0, -1.05306593e-17, 0.0,\n -1.48611144e-18]], [[9.77881196e-17, 0.0, 0.0, 0.0, 1.32341829e-17], [\n 0.0, 0.0, 0.0, 0.0, -2.20558493e-16], [0.0, -2.21495572e-16, 0.0, 0.0, \n -1.05306593e-17], [0.0, 1.32341829e-17, -5.49032632e-19, -\n 1.05306593e-17, 0.0]]], [[[0.0, 0.0, -5.49032632e-19, 1.32341829e-17, -\n 2.11513946e-16], [0.0, 0.0, 0.0, 9.77881196e-17, 0.0], [0.0, -\n 5.49032632e-19, -2.11513946e-16, 1.32341829e-17, 0.0], [0.0, -\n 2.11513946e-16, -1.05306593e-17, -1.05306593e-17, 0.0]], [[-\n 2.21495572e-16, 0.0, 9.77881196e-17, 1.32341829e-17, 0.0], [0.0, -\n 1.05306593e-17, -2.11513946e-16, 0.0, 0.0], [0.0, 0.0, 0.0, -\n 2.11513946e-16, 0.0], [0.0, 0.0, -2.11513946e-16, 0.0, -2.20558493e-16]\n ], [[1.32341829e-17, 0.0, 0.0, 0.0, 9.77881196e-17], [0.0, 0.0, 0.0, \n 0.0, -1.48611144e-18], [0.0, -5.49032632e-19, 0.0, 0.0, -2.11513946e-16\n ], [0.0, 9.77881196e-17, -2.21495572e-16, -2.11513946e-16, 0.0]]]]'], {}), '([[[[0.0, 0.0, -2.21495572e-16, 9.77881196e-17, -1.05306593e-17], [\n 0.0, 0.0, 0.0, 1.32341829e-17, 0.0], [0.0, -2.21495572e-16, -\n 1.05306593e-17, 9.77881196e-17, 0.0], [0.0, -1.05306593e-17, -\n 2.11513946e-16, -2.11513946e-16, 0.0]], [[-5.49032632e-19, 0.0, \n 1.32341829e-17, 9.77881196e-17, 0.0], [0.0, -2.11513946e-16, -\n 1.05306593e-17, 0.0, 0.0], [0.0, 0.0, 0.0, -1.05306593e-17, 0.0], [0.0,\n 0.0, -1.05306593e-17, 0.0, -1.48611144e-18]], [[9.77881196e-17, 0.0, \n 0.0, 0.0, 1.32341829e-17], [0.0, 0.0, 0.0, 0.0, -2.20558493e-16], [0.0,\n -2.21495572e-16, 0.0, 0.0, -1.05306593e-17], [0.0, 1.32341829e-17, -\n 5.49032632e-19, -1.05306593e-17, 0.0]]], [[[0.0, 0.0, -5.49032632e-19, \n 1.32341829e-17, -2.11513946e-16], [0.0, 0.0, 0.0, 9.77881196e-17, 0.0],\n [0.0, -5.49032632e-19, -2.11513946e-16, 1.32341829e-17, 0.0], [0.0, -\n 2.11513946e-16, -1.05306593e-17, -1.05306593e-17, 0.0]], [[-\n 2.21495572e-16, 0.0, 9.77881196e-17, 1.32341829e-17, 0.0], [0.0, -\n 1.05306593e-17, -2.11513946e-16, 0.0, 0.0], [0.0, 0.0, 0.0, -\n 2.11513946e-16, 0.0], [0.0, 0.0, -2.11513946e-16, 0.0, -2.20558493e-16]\n ], [[1.32341829e-17, 0.0, 0.0, 0.0, 9.77881196e-17], [0.0, 0.0, 0.0, \n 0.0, -1.48611144e-18], [0.0, -5.49032632e-19, 0.0, 0.0, -2.11513946e-16\n ], [0.0, 9.77881196e-17, -2.21495572e-16, -2.11513946e-16, 0.0]]]])\n', (9495, 10856), True, 'import numpy as np\n'), ((15673, 15698), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (15688, 15698), True, 'import oneflow.experimental as flow\n'), ((17043, 17066), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (17061, 17066), True, 'import oneflow.experimental as flow\n'), ((17083, 17104), 'numpy.random.randn', 'np.random.randn', (['(4)', '(7)'], {}), '(4, 7)\n', (17098, 17104), True, 'import numpy as np\n'), ((17361, 17384), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (17379, 17384), True, 'import oneflow.experimental as flow\n'), ((17401, 17425), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (17416, 17425), True, 'import numpy as np\n'), ((17682, 17705), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (17700, 17705), True, 'import oneflow.experimental as flow\n'), ((17722, 17749), 'numpy.random.randn', 'np.random.randn', (['(8)', '(9)', '(7)', '(3)'], {}), '(8, 9, 7, 3)\n', (17737, 17749), True, 'import numpy as np\n'), ((18010, 18034), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['axis'], {}), '(axis)\n', (18028, 18034), True, 'import oneflow.experimental as flow\n'), ((18194, 19807), 'numpy.array', 'np.array', (['[[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825], [\n 0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]]'], {}), '([[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825],\n [0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]])\n', (18202, 19807), True, 'import numpy as np\n'), ((24780, 24795), 'unittest.main', 'unittest.main', ([], {}), '()\n', (24793, 24795), False, 'import unittest\n'), ((937, 951), 'oneflow.experimental.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (949, 951), True, 'import oneflow.experimental as flow\n'), ((966, 993), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (981, 993), True, 'import numpy as np\n'), ((1012, 1030), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (1022, 1030), True, 'import numpy as np\n'), ((1043, 1059), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (1054, 1059), True, 'import oneflow.experimental as flow\n'), ((760, 803), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (801, 803), True, 'import oneflow.experimental as flow\n'), ((1367, 1382), 'oneflow.experimental.nn.ReLU6', 'flow.nn.ReLU6', ([], {}), '()\n', (1380, 1382), True, 'import oneflow.experimental as flow\n'), ((1397, 1424), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (1412, 1424), True, 'import numpy as np\n'), ((1491, 1507), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (1502, 1507), True, 'import oneflow.experimental as flow\n'), ((1188, 1231), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1229, 1231), True, 'import oneflow.experimental as flow\n'), ((1830, 1852), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1841, 1852), True, 'import oneflow.experimental as flow\n'), ((1869, 1883), 'oneflow.experimental.nn.Tanh', 'flow.nn.Tanh', ([], {}), '()\n', (1881, 1883), True, 'import oneflow.experimental as flow\n'), ((1916, 1934), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (1923, 1934), True, 'import numpy as np\n'), ((2070, 2102), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2077, 2102), True, 'import numpy as np\n'), ((2767, 2789), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2778, 2789), True, 'import oneflow.experimental as flow\n'), ((2803, 2815), 'oneflow.experimental.tanh', 'flow.tanh', (['x'], {}), '(x)\n', (2812, 2815), True, 'import oneflow.experimental as flow\n'), ((2828, 2846), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (2835, 2846), True, 'import numpy as np\n'), ((2989, 3011), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (3000, 3011), True, 'import oneflow.experimental as flow\n'), ((3046, 3064), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (3053, 3064), True, 'import numpy as np\n'), ((1636, 1679), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1677, 1679), True, 'import oneflow.experimental as flow\n'), ((3347, 3360), 'oneflow.experimental.nn.ELU', 'flow.nn.ELU', ([], {}), '()\n', (3358, 3360), True, 'import oneflow.experimental as flow\n'), ((3375, 3402), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (3390, 3402), True, 'import numpy as np\n'), ((3480, 3496), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (3491, 3496), True, 'import oneflow.experimental as flow\n'), ((3655, 3677), 'oneflow.experimental.nn.ELU', 'flow.nn.ELU', ([], {'alpha': '(1.2)'}), '(alpha=1.2)\n', (3666, 3677), True, 'import oneflow.experimental as flow\n'), ((3692, 3719), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (3707, 3719), True, 'import numpy as np\n'), ((3797, 3813), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (3808, 3813), True, 'import oneflow.experimental as flow\n'), ((3172, 3215), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3213, 3215), True, 'import oneflow.experimental as flow\n'), ((4190, 4212), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (4201, 4212), True, 'import oneflow.experimental as flow\n'), ((4229, 4243), 'oneflow.experimental.nn.GELU', 'flow.nn.GELU', ([], {}), '()\n', (4241, 4243), True, 'import oneflow.experimental as flow\n'), ((4276, 4316), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (4284, 4316), True, 'import numpy as np\n'), ((4506, 4528), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (4517, 4528), True, 'import oneflow.experimental as flow\n'), ((4542, 4554), 'oneflow.experimental.gelu', 'flow.gelu', (['x'], {}), '(x)\n', (4551, 4554), True, 'import oneflow.experimental as flow\n'), ((4567, 4607), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (4575, 4607), True, 'import numpy as np\n'), ((4797, 4819), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (4808, 4819), True, 'import oneflow.experimental as flow\n'), ((4855, 4895), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (4863, 4895), True, 'import numpy as np\n'), ((3952, 3995), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3993, 3995), True, 'import oneflow.experimental as flow\n'), ((6130, 6154), 'numpy.ones', 'np.ones', (['input_arr.shape'], {}), '(input_arr.shape)\n', (6137, 6154), True, 'import numpy as np\n'), ((6504, 6517), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6515, 6517), False, 'from collections import OrderedDict\n'), ((6683, 6703), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6693, 6703), False, 'from test_util import GenArgList\n'), ((6320, 6363), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (6361, 6363), True, 'import oneflow.experimental as flow\n'), ((16174, 16195), 'oneflow.experimental.nn.Hardsigmoid', 'flow.nn.Hardsigmoid', ([], {}), '()\n', (16193, 16195), True, 'import oneflow.experimental as flow\n'), ((16210, 16237), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (16225, 16237), True, 'import numpy as np\n'), ((16311, 16327), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (16322, 16327), True, 'import oneflow.experimental as flow\n'), ((15983, 16026), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (16024, 16026), True, 'import oneflow.experimental as flow\n'), ((16640, 16653), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16651, 16653), False, 'from collections import OrderedDict\n'), ((16918, 16938), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (16928, 16938), False, 'from test_util import GenArgList\n'), ((16456, 16499), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (16497, 16499), True, 'import oneflow.experimental as flow\n'), ((20873, 20886), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20884, 20886), False, 'from collections import OrderedDict\n'), ((21130, 21150), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21140, 21150), False, 'from test_util import GenArgList\n'), ((20682, 20725), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (20723, 20725), True, 'import oneflow.experimental as flow\n'), ((21409, 21429), 'oneflow.experimental.nn.LogSigmoid', 'flow.nn.LogSigmoid', ([], {}), '()\n', (21427, 21429), True, 'import oneflow.experimental as flow\n'), ((21444, 21471), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (21459, 21471), True, 'import numpy as np\n'), ((21536, 21552), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (21547, 21552), True, 'import oneflow.experimental as flow\n'), ((21220, 21263), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (21261, 21263), True, 'import oneflow.experimental as flow\n'), ((21866, 21884), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {}), '()\n', (21882, 21884), True, 'import oneflow.experimental as flow\n'), ((21899, 21926), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (21914, 21926), True, 'import numpy as np\n'), ((22013, 22029), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (22024, 22029), True, 'import oneflow.experimental as flow\n'), ((22182, 22209), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)'}), '(beta=1.11)\n', (22198, 22209), True, 'import oneflow.experimental as flow\n'), ((22224, 22251), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (22239, 22251), True, 'import numpy as np\n'), ((22381, 22397), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (22392, 22397), True, 'import oneflow.experimental as flow\n'), ((22555, 22598), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)', 'threshold': '(1.55)'}), '(beta=1.11, threshold=1.55)\n', (22571, 22598), True, 'import oneflow.experimental as flow\n'), ((22613, 22640), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (22628, 22640), True, 'import numpy as np\n'), ((22772, 22788), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (22783, 22788), True, 'import oneflow.experimental as flow\n'), ((21681, 21724), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (21722, 21724), True, 'import oneflow.experimental as flow\n'), ((23104, 23123), 'oneflow.experimental.nn.Hardswish', 'flow.nn.Hardswish', ([], {}), '()\n', (23121, 23123), True, 'import oneflow.experimental as flow\n'), ((23138, 23165), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (23153, 23165), True, 'import numpy as np\n'), ((23309, 23325), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (23320, 23325), True, 'import oneflow.experimental as flow\n'), ((22917, 22960), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (22958, 22960), True, 'import oneflow.experimental as flow\n'), ((23639, 23657), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {}), '()\n', (23655, 23657), True, 'import oneflow.experimental as flow\n'), ((23672, 23699), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (23687, 23699), True, 'import numpy as np\n'), ((23764, 23780), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (23775, 23780), True, 'import oneflow.experimental as flow\n'), ((23936, 23979), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {'min_val': '(-2.0)', 'max_val': '(2.3)'}), '(min_val=-2.0, max_val=2.3)\n', (23952, 23979), True, 'import oneflow.experimental as flow\n'), ((23994, 24021), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (24009, 24021), True, 'import numpy as np\n'), ((24090, 24106), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (24101, 24106), True, 'import oneflow.experimental as flow\n'), ((23454, 23497), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (23495, 23497), True, 'import oneflow.experimental as flow\n'), ((24452, 24500), 'oneflow.experimental.nn.LeakyReLU', 'flow.nn.LeakyReLU', ([], {'negative_slope': 'negative_slope'}), '(negative_slope=negative_slope)\n', (24469, 24500), True, 'import oneflow.experimental as flow\n'), ((24515, 24542), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (24530, 24542), True, 'import numpy as np\n'), ((24630, 24646), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (24641, 24646), True, 'import oneflow.experimental as flow\n'), ((24235, 24278), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (24276, 24278), True, 'import oneflow.experimental as flow\n'), ((1454, 1472), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (1464, 1472), True, 'import numpy as np\n'), ((5021, 5031), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (5027, 5031), True, 'import numpy as np\n'), ((5344, 5378), 'numpy.max', 'np.max', (['x'], {'axis': 'dim', 'keepdims': '(True)'}), '(x, axis=dim, keepdims=True)\n', (5350, 5378), True, 'import numpy as np\n'), ((5586, 5605), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5597, 5605), True, 'import oneflow.experimental as flow\n'), ((6046, 6065), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (6057, 6065), True, 'import oneflow.experimental as flow\n'), ((6902, 6921), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (6913, 6921), True, 'import oneflow.experimental as flow\n'), ((7207, 7226), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7218, 7226), True, 'import oneflow.experimental as flow\n'), ((7511, 7530), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7522, 7530), True, 'import oneflow.experimental as flow\n'), ((7815, 7834), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7826, 7834), True, 'import oneflow.experimental as flow\n'), ((15790, 15809), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15801, 15809), True, 'import oneflow.experimental as flow\n'), ((16269, 16297), 'numpy.minimum', 'np.minimum', (['(1)', '((arr + 3) / 6)'], {}), '(1, (arr + 3) / 6)\n', (16279, 16297), True, 'import numpy as np\n'), ((17143, 17162), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (17154, 17162), True, 'import oneflow.experimental as flow\n'), ((17464, 17483), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (17475, 17483), True, 'import oneflow.experimental as flow\n'), ((17788, 17807), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (17799, 17807), True, 'import oneflow.experimental as flow\n'), ((18126, 18145), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (18137, 18145), True, 'import oneflow.experimental as flow\n'), ((23241, 23262), 'numpy.where', 'np.where', (['(f < 0)', '(0)', 'f'], {}), '(f < 0, 0, f)\n', (23249, 23262), True, 'import numpy as np\n'), ((23732, 23750), 'numpy.minimum', 'np.minimum', (['(1)', 'arr'], {}), '(1, arr)\n', (23742, 23750), True, 'import numpy as np\n'), ((24056, 24076), 'numpy.minimum', 'np.minimum', (['(2.3)', 'arr'], {}), '(2.3, arr)\n', (24066, 24076), True, 'import numpy as np\n'), ((24561, 24579), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (24571, 24579), True, 'import numpy as np\n'), ((2193, 2216), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (2209, 2216), True, 'import numpy as np\n'), ((4134, 4158), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (4142, 4158), True, 'import numpy as np\n'), ((4450, 4474), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (4458, 4474), True, 'import numpy as np\n'), ((4741, 4765), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (4749, 4765), True, 'import numpy as np\n'), ((23211, 23232), 'numpy.where', 'np.where', (['(f < 0)', '(0)', 'f'], {}), '(f < 0, 0, f)\n', (23219, 23232), True, 'import numpy as np\n'), ((24599, 24617), 'numpy.minimum', 'np.minimum', (['(0)', 'arr'], {}), '(0, arr)\n', (24609, 24617), True, 'import numpy as np\n'), ((3450, 3461), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (3456, 3461), True, 'import numpy as np\n'), ((3767, 3778), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (3773, 3778), True, 'import numpy as np\n'), ((21509, 21521), 'numpy.exp', 'np.exp', (['(-arr)'], {}), '(-arr)\n', (21515, 21521), True, 'import numpy as np\n'), ((21981, 21998), 'numpy.exp', 'np.exp', (['(1.0 * arr)'], {}), '(1.0 * arr)\n', (21987, 21998), True, 'import numpy as np\n'), ((22339, 22357), 'numpy.exp', 'np.exp', (['(1.11 * arr)'], {}), '(1.11 * arr)\n', (22345, 22357), True, 'import numpy as np\n'), ((22730, 22748), 'numpy.exp', 'np.exp', (['(1.11 * arr)'], {}), '(1.11 * arr)\n', (22736, 22748), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from util import convert_to_onnx_and_check def test_matmul(test_case): @flow.global_function() def matmul(): a = flow.get_variable( name="a", shape=(2, 3), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) b = flow.get_variable( name="b", shape=(3, 4), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) return flow.matmul(a, b) convert_to_onnx_and_check(matmul) def test_matmul_ta(test_case): @flow.global_function() def matmul(): a = flow.get_variable( name="a", shape=(3, 2), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) b = flow.get_variable( name="b", shape=(3, 4), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) return flow.matmul(a, b, transpose_a=True) convert_to_onnx_and_check(matmul) def test_matmul_tb(test_case): @flow.global_function() def matmul(): a = flow.get_variable( name="a", shape=(2, 3), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) b = flow.get_variable( name="b", shape=(4, 3), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) return flow.matmul(a, b, transpose_b=True) convert_to_onnx_and_check(matmul) def test_matmul_ta_tb(test_case): @flow.global_function() def matmul(): a = flow.get_variable( name="a", shape=(3, 2), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) b = flow.get_variable( name="b", shape=(4, 3), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) return flow.matmul(a, b, transpose_a=True, transpose_b=True) convert_to_onnx_and_check(matmul) def test_batch_matmul(test_case): @flow.global_function() def matmul(): a = flow.get_variable( name="a", shape=(4, 2, 3), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) b = flow.get_variable( name="b", shape=(4, 3, 4), dtype=flow.float, initializer=flow.random_uniform_initializer(), ) return flow.matmul(a, b) convert_to_onnx_and_check(matmul)
[ "oneflow.matmul", "oneflow.global_function", "oneflow.random_uniform_initializer" ]
[((691, 713), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (711, 713), True, 'import oneflow as flow\n'), ((1126, 1159), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['matmul'], {}), '(matmul)\n', (1151, 1159), False, 'from util import convert_to_onnx_and_check\n'), ((1198, 1220), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1218, 1220), True, 'import oneflow as flow\n'), ((1651, 1684), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['matmul'], {}), '(matmul)\n', (1676, 1684), False, 'from util import convert_to_onnx_and_check\n'), ((1723, 1745), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1743, 1745), True, 'import oneflow as flow\n'), ((2176, 2209), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['matmul'], {}), '(matmul)\n', (2201, 2209), False, 'from util import convert_to_onnx_and_check\n'), ((2251, 2273), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2271, 2273), True, 'import oneflow as flow\n'), ((2722, 2755), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['matmul'], {}), '(matmul)\n', (2747, 2755), False, 'from util import convert_to_onnx_and_check\n'), ((2797, 2819), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2817, 2819), True, 'import oneflow as flow\n'), ((3238, 3271), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['matmul'], {}), '(matmul)\n', (3263, 3271), False, 'from util import convert_to_onnx_and_check\n'), ((1103, 1120), 'oneflow.matmul', 'flow.matmul', (['a', 'b'], {}), '(a, b)\n', (1114, 1120), True, 'import oneflow as flow\n'), ((1610, 1645), 'oneflow.matmul', 'flow.matmul', (['a', 'b'], {'transpose_a': '(True)'}), '(a, b, transpose_a=True)\n', (1621, 1645), True, 'import oneflow as flow\n'), ((2135, 2170), 'oneflow.matmul', 'flow.matmul', (['a', 'b'], {'transpose_b': '(True)'}), '(a, b, transpose_b=True)\n', (2146, 2170), True, 'import oneflow as flow\n'), ((2663, 2716), 'oneflow.matmul', 'flow.matmul', (['a', 'b'], {'transpose_a': '(True)', 'transpose_b': '(True)'}), '(a, b, transpose_a=True, transpose_b=True)\n', (2674, 2716), True, 'import oneflow as flow\n'), ((3215, 3232), 'oneflow.matmul', 'flow.matmul', (['a', 'b'], {}), '(a, b)\n', (3226, 3232), True, 'import oneflow as flow\n'), ((865, 898), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (896, 898), True, 'import oneflow as flow\n'), ((1043, 1076), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1074, 1076), True, 'import oneflow as flow\n'), ((1372, 1405), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1403, 1405), True, 'import oneflow as flow\n'), ((1550, 1583), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1581, 1583), True, 'import oneflow as flow\n'), ((1897, 1930), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1928, 1930), True, 'import oneflow as flow\n'), ((2075, 2108), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (2106, 2108), True, 'import oneflow as flow\n'), ((2425, 2458), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (2456, 2458), True, 'import oneflow as flow\n'), ((2603, 2636), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (2634, 2636), True, 'import oneflow as flow\n'), ((2974, 3007), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3005, 3007), True, 'import oneflow as flow\n'), ((3155, 3188), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3186, 3188), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np from test_util import GenArgList import oneflow as flow import oneflow.unittest def _test_arange(test_case, device): np_out = np.arange(13, dtype=np.float32) of_out = flow.arange(13, device=device, dtype=flow.float32) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05)) def _test_arange_step_prarm(test_case, device): np_out = np.arange(0, 20, 2) of_out = flow.arange(0, 20, step=2, device=device) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05)) def _test_arange_more_params(test_case, device): np_out = np.arange(0, 100, 3) of_out = flow.arange(start=0, end=100, step=3, device=device) test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05)) def _test_arange_backward(test_case, device): np_out = np.arange(13) x = flow.arange(13, device=device) x.requires_grad = True y = x.sum() y.backward() test_case.assertTrue(np.allclose(x.grad.numpy(), np.ones(13), 1e-05, 1e-05)) @flow.unittest.skip_unless_1n1d() class TestArange(flow.unittest.TestCase): def test_transpose(test_case): arg_dict = OrderedDict() arg_dict["function_test"] = [ _test_arange, _test_arange_step_prarm, _test_arange_more_params, _test_arange_backward, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.arange", "oneflow.unittest.skip_unless_1n1d" ]
[((1667, 1699), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1697, 1699), True, 'import oneflow as flow\n'), ((796, 827), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (805, 827), True, 'import numpy as np\n'), ((841, 891), 'oneflow.arange', 'flow.arange', (['(13)'], {'device': 'device', 'dtype': 'flow.float32'}), '(13, device=device, dtype=flow.float32)\n', (852, 891), True, 'import oneflow as flow\n'), ((1031, 1050), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (1040, 1050), True, 'import numpy as np\n'), ((1064, 1105), 'oneflow.arange', 'flow.arange', (['(0)', '(20)'], {'step': '(2)', 'device': 'device'}), '(0, 20, step=2, device=device)\n', (1075, 1105), True, 'import oneflow as flow\n'), ((1246, 1266), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(3)'], {}), '(0, 100, 3)\n', (1255, 1266), True, 'import numpy as np\n'), ((1280, 1332), 'oneflow.arange', 'flow.arange', ([], {'start': '(0)', 'end': '(100)', 'step': '(3)', 'device': 'device'}), '(start=0, end=100, step=3, device=device)\n', (1291, 1332), True, 'import oneflow as flow\n'), ((1470, 1483), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (1479, 1483), True, 'import numpy as np\n'), ((1492, 1522), 'oneflow.arange', 'flow.arange', (['(13)'], {'device': 'device'}), '(13, device=device)\n', (1503, 1522), True, 'import oneflow as flow\n'), ((2153, 2168), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2166, 2168), False, 'import unittest\n'), ((1796, 1809), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1807, 1809), False, 'from collections import OrderedDict\n'), ((2058, 2078), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2068, 2078), False, 'from test_util import GenArgList\n'), ((1636, 1647), 'numpy.ones', 'np.ones', (['(13)'], {}), '(13)\n', (1643, 1647), True, 'import numpy as np\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import numpy as np import oneflow as flow import tensorflow as tf from test_util import GenArgList import oneflow.typing as oft import unittest import os gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) def _random_inputs( params_shape, indices_shape, updates_shape, allow_duplicate_index=True ): params = np.random.rand(*params_shape).astype(np.float32) updates = np.random.rand(*updates_shape).astype(np.float32) indices = [] indices_rows = np.prod(indices_shape[:-1]) indices_cols = indices_shape[-1] for col in range(indices_cols): if allow_duplicate_index is False and indices_rows <= params_shape[col]: rand_indices = np.arange(params_shape[col], dtype=np.int32) np.random.shuffle(rand_indices) indices_col = rand_indices[:indices_rows].reshape(indices_shape[:-1]) else: indices_col = np.random.randint( low=0, high=params_shape[col], size=(indices_rows,), dtype=np.int32 ).reshape(indices_shape[:-1]) indices.append(indices_col) indices = np.stack(indices, axis=len(indices_shape) - 1) if allow_duplicate_index is False: existing_nd_index_set = set() for nd_index in indices.reshape(-1, indices.shape[-1]): nd_index_str = "(" + ",".join(map(str, nd_index)) + ")" assert ( nd_index_str not in existing_nd_index_set ), "random generated duplicate nd index {}".format(nd_index_str) existing_nd_index_set.add(nd_index_str) return params, updates, indices def _make_scatter_nd_fn(indices, updates, shape, device_type, mirrored, compare_fn): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) if mirrored: func_config.default_logical_view(flow.scope.mirrored_view()) else: func_config.default_logical_view(flow.scope.consistent_view()) def do_scatter_nd(indices_blob, updates_blob): with flow.scope.placement(device_type, "0:0"): x = flow.get_variable( "updates", shape=updates.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) x = flow.cast_to_current_logical_view(x) x = x + updates_blob y = flow.scatter_nd(indices_blob, x, shape) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(y) flow.watch_diff(x, compare_fn) return y if mirrored: @flow.global_function(type="train", function_config=func_config) def scatter_nd_fn( indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32), updates_def: oft.ListNumpy.Placeholder(updates.shape, dtype=flow.float), ): return do_scatter_nd(indices_def, updates_def) else: @flow.global_function(type="train", function_config=func_config) def scatter_nd_fn( indices_def: oft.Numpy.Placeholder(indices.shape, dtype=flow.int32), updates_def: oft.Numpy.Placeholder(updates.shape, dtype=flow.float), ): return do_scatter_nd(indices_def, updates_def) return scatter_nd_fn def _compare_scatter_nd_with_tf( test_case, device_type, params_shape, indices_shape, updates_shape, mirrored=False, verbose=False, ): _, updates, indices = _random_inputs(params_shape, indices_shape, updates_shape) indices_const = tf.constant(indices) with tf.GradientTape() as t: x = tf.Variable(updates) y = tf.scatter_nd(indices_const, x, params_shape) dy_dx = t.gradient(y, x) if mirrored: def compare_dy(params_grad): test_case.assertTrue( np.array_equal(dy_dx.numpy(), params_grad.numpy_list()[0]) ) else: def compare_dy(params_grad): test_case.assertTrue(np.array_equal(dy_dx.numpy(), params_grad.numpy())) scatter_nd_fn = _make_scatter_nd_fn( indices, updates, params_shape, device_type, mirrored, compare_dy ) check_point = flow.train.CheckPoint() check_point.init() if mirrored: of_y = scatter_nd_fn([indices], [updates]).get().numpy_list()[0] else: of_y = scatter_nd_fn(indices, updates).get().numpy() if verbose is True: print("device_type:", device_type) print("indices:", indices) print("updates:", updates) print("tf_params:", y.numpy()) print("of_params:", of_y) test_case.assertTrue(np.allclose(y.numpy(), of_y)) def _compare_scatter_nd_update_with_tf( test_case, device_type, params_shape, indices_shape, updates_shape, allow_duplicate_index=False, verbose=False, ): params, updates, indices = _random_inputs( params_shape, indices_shape, updates_shape, allow_duplicate_index ) x_const = tf.constant(params) y_const = tf.constant(updates) i_const = tf.constant(indices) with tf.GradientTape() as t1: x = tf.Variable(params) z1 = tf.tensor_scatter_nd_update(x, i_const, y_const) dz_dx = t1.gradient(z1, x) with tf.GradientTape() as t2: y = tf.Variable(updates) z2 = tf.tensor_scatter_nd_update(x_const, i_const, y) dz_dy = t2.gradient(z2, y) test_case.assertTrue(np.allclose(z1.numpy(), z2.numpy())) def compare_dz_dx(params_grad): test_case.assertTrue(np.allclose(dz_dx.numpy(), params_grad.numpy())) def compare_dz_dy(updates_grad): test_case.assertTrue(np.allclose(dz_dy.numpy(), updates_grad.numpy())) flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.consistent_view()) @flow.global_function(type="train", function_config=func_config) def scatter_nd_update_grad_fn( x_def: oft.Numpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.Numpy.Placeholder(indices.shape, dtype=flow.int32), y_def: oft.Numpy.Placeholder(updates.shape, dtype=flow.float), ): with flow.scope.placement(device_type, "0:0"): x = flow.get_variable( "params", shape=params.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) y = flow.get_variable( "updates", shape=updates.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) x = x + x_def y = y + y_def z = flow.tensor_scatter_nd_update(x, indices_def, y) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(z) flow.watch_diff(x, compare_dz_dx) flow.watch_diff(y, compare_dz_dy) return z check_point = flow.train.CheckPoint() check_point.init() of_z = scatter_nd_update_grad_fn(params, indices, updates).get() if verbose is True: print("device_type:", device_type) print("x:", params) print("y:", updates) print("indices:", indices) print("tf_z:", z1.numpy()) print("of_z:", of_z.numpy()) test_case.assertTrue(np.allclose(z1.numpy(), of_z.numpy())) def _of_tensor_scatter_nd_add( params, indices, updates, device_type, mirrored, params_grad_watcher, updates_grad_watcher, ): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) def do_tensor_scatter_nd_add(params_blob, indices_blob, updates_blob): with flow.scope.placement(device_type, "0:0"): params_var = flow.get_variable( "params", shape=params_blob.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) updates_var = flow.get_variable( "updates", shape=updates_blob.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) params_var = flow.cast_to_current_logical_view(params_var) params_blob = flow.cast_to_current_logical_view(params_blob) updates_blob = flow.cast_to_current_logical_view(updates_blob) updates_var = flow.cast_to_current_logical_view(updates_var) params_var = params_var + params_blob updates_var = updates_var + updates_blob out = flow.tensor_scatter_nd_add(params_var, indices_blob, updates_var) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(out) flow.watch_diff(params_var, params_grad_watcher) flow.watch_diff(updates_var, updates_grad_watcher) return out if mirrored: func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(type="train", function_config=func_config) def tensor_scatter_nd_add_fn( params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32), updates_def: oft.ListNumpy.Placeholder(updates.shape, dtype=flow.float), ): return do_tensor_scatter_nd_add(params_def, indices_def, updates_def) check_point = flow.train.CheckPoint() check_point.init() return ( tensor_scatter_nd_add_fn([params], [indices], [updates]) .get() .numpy_list()[0] ) else: func_config.default_logical_view(flow.scope.consistent_view()) @flow.global_function(type="train", function_config=func_config) def tensor_scatter_nd_add_fn( params_def: oft.Numpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.Numpy.Placeholder(indices.shape, dtype=flow.int32), updates_def: oft.Numpy.Placeholder(updates.shape, dtype=flow.float), ): return do_tensor_scatter_nd_add(params_def, indices_def, updates_def) check_point = flow.train.CheckPoint() check_point.init() return tensor_scatter_nd_add_fn(params, indices, updates).get().numpy() def _compare_tensor_scatter_nd_add_with_tf( test_case, params_shape, indices_shape, updates_shape, device_type, mirrored ): params, updates, indices = _random_inputs( params_shape, indices_shape, updates_shape, True ) params_const = tf.constant(params) indices_const = tf.constant(indices) updates_const = tf.constant(updates) with tf.GradientTape() as t1: params_var = tf.Variable(params) tf_out1 = tf.tensor_scatter_nd_add(params_var, indices_const, updates_const) tf_params_grad = t1.gradient(tf_out1, params_var) with tf.GradientTape() as t2: updates_var = tf.Variable(updates) tf_out2 = tf.tensor_scatter_nd_add(params_const, indices_const, updates_var) tf_updates_grad = t2.gradient(tf_out2, updates_var) test_case.assertTrue(np.allclose(tf_out1.numpy(), tf_out2.numpy())) def compare_params_grad(of_params_grad): tf_params_grad_np = tf_params_grad.numpy() of_params_grad_np = ( of_params_grad.numpy_list()[0] if mirrored else of_params_grad.numpy() ) test_case.assertTrue(np.allclose(tf_params_grad_np, of_params_grad_np)) def compare_updates_grad(of_updates_grad): tf_updates_grad_np = tf_updates_grad.numpy() of_updates_grad_np = ( of_updates_grad.numpy_list()[0] if mirrored else of_updates_grad.numpy() ) test_case.assertTrue(np.allclose(tf_updates_grad_np, of_updates_grad_np)) of_out = _of_tensor_scatter_nd_add( params, indices, updates, device_type, mirrored, compare_params_grad, compare_updates_grad, ) test_case.assertTrue(np.allclose(tf_out1.numpy(), of_out)) def _of_scatter_nd_dynamic_indices( indices, updates, indices_static_shape, updates_static_shape, params_shape ): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def scatter_nd_fn( indices_def: oft.ListNumpy.Placeholder(indices_static_shape, dtype=flow.int32), updates_def: oft.ListNumpy.Placeholder(updates_static_shape, dtype=flow.float), ): with flow.scope.placement("gpu", "0:0"): return flow.scatter_nd(indices_def, updates_def, params_shape) return scatter_nd_fn([indices], [updates]).get().numpy_list()[0] def _compare_scatter_nd_dynamic_indices_with_tf( test_case, indices_shape, updates_shape, indices_static_shape, updates_static_shape, params_shape, ): _, updates, indices = _random_inputs(params_shape, indices_shape, updates_shape) indices_const = tf.constant(indices) x = tf.Variable(updates) y = tf.scatter_nd(indices_const, x, params_shape) of_y = _of_scatter_nd_dynamic_indices( indices, updates, indices_static_shape, updates_static_shape, params_shape ) test_case.assertTrue(np.allclose(y.numpy(), of_y)) def _of_tensor_scatter_nd_update_dynamic_indices( params, indices, updates, indices_static_shape, updates_static_shape ): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def tensor_scatter_nd_update_fn( params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices_static_shape, dtype=flow.int32), updates_def: oft.ListNumpy.Placeholder(updates_static_shape, dtype=flow.float), ): with flow.scope.placement("gpu", "0:0"): return flow.tensor_scatter_nd_update(params_def, indices_def, updates_def) return ( tensor_scatter_nd_update_fn([params], [indices], [updates]) .get() .numpy_list()[0] ) def _compare_tensor_scatter_nd_update_dynamic_indices_with_tf( test_case, params_shape, indices_shape, updates_shape, indices_static_shape, updates_static_shape, ): params, updates, indices = _random_inputs( params_shape, indices_shape, updates_shape, False ) i = tf.constant(indices) x = tf.Variable(params) y = tf.Variable(updates) z = tf.tensor_scatter_nd_update(x, i, y) of_z = _of_tensor_scatter_nd_update_dynamic_indices( params, indices, updates, indices_static_shape, updates_static_shape ) test_case.assertTrue(np.allclose(z.numpy(), of_z)) def _of_tensor_scatter_nd_add_dynamic_indices( params, indices, updates, indices_static_shape, updates_static_shape ): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def tensor_scatter_nd_add_fn( params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices_static_shape, dtype=flow.int32), updates_def: oft.ListNumpy.Placeholder(updates_static_shape, dtype=flow.float), ): with flow.scope.placement("gpu", "0:0"): return flow.tensor_scatter_nd_add(params_def, indices_def, updates_def) return ( tensor_scatter_nd_add_fn([params], [indices], [updates]).get().numpy_list()[0] ) def _compare_tensor_scatter_nd_add_dynamic_indices_with_tf( test_case, params_shape, indices_shape, updates_shape, indices_static_shape, updates_static_shape, ): params, updates, indices = _random_inputs( params_shape, indices_shape, updates_shape ) i = tf.constant(indices) x = tf.Variable(params) y = tf.Variable(updates) z = tf.tensor_scatter_nd_add(x, i, y) of_z = _of_tensor_scatter_nd_add_dynamic_indices( params, indices, updates, indices_static_shape, updates_static_shape ) test_case.assertTrue(np.allclose(z.numpy(), of_z)) @flow.unittest.skip_unless_1n1d() class TestScatterNd(flow.unittest.TestCase): def test_scatter_nd(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["params_shape"] = [(10,)] arg_dict["indices_shape"] = [(5, 1)] arg_dict["updates_shape"] = [(5,)] arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): _compare_scatter_nd_with_tf(test_case, *arg) def test_scatter_nd_case_1(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(128,)] arg_dict["indices_shape"] = [(100, 1)] arg_dict["updates_shape"] = [(100,)] for arg in GenArgList(arg_dict): _compare_scatter_nd_with_tf(test_case, *arg) def test_scatter_nd_case_2(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(32, 16, 4)] arg_dict["indices_shape"] = [(50, 2)] arg_dict["updates_shape"] = [(50, 4)] for arg in GenArgList(arg_dict): _compare_scatter_nd_with_tf(test_case, *arg) def test_scatter_nd_case_3(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(24, 25, 32, 10, 12)] arg_dict["indices_shape"] = [(3, 4, 2)] arg_dict["updates_shape"] = [(3, 4, 32, 10, 12)] arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): _compare_scatter_nd_with_tf(test_case, *arg) def test_scatter_nd_case_4(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(8,)] arg_dict["indices_shape"] = [(12, 1)] arg_dict["updates_shape"] = [(12,)] for arg in GenArgList(arg_dict): _compare_scatter_nd_with_tf(test_case, *arg) def test_scatter_nd_update(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["params_shape"] = [(10,)] arg_dict["indices_shape"] = [(5, 1)] arg_dict["updates_shape"] = [(5,)] arg_dict["allow_duplicate_index"] = [False] # arg_dict["verbose"] = [True] for arg in GenArgList(arg_dict): _compare_scatter_nd_update_with_tf(test_case, *arg) def test_scatter_nd_update_case_1(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(256, 64)] arg_dict["indices_shape"] = [(128, 2)] arg_dict["updates_shape"] = [(128,)] for arg in GenArgList(arg_dict): _compare_scatter_nd_update_with_tf(test_case, *arg) def test_scatter_nd_update_case_2(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(20, 10, 11, 3, 5)] arg_dict["indices_shape"] = [(2, 4, 3)] arg_dict["updates_shape"] = [(2, 4, 3, 5)] for arg in GenArgList(arg_dict): _compare_scatter_nd_update_with_tf(test_case, *arg) def test_scatter_nd_update_case_3(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["cpu", "gpu"] arg_dict["params_shape"] = [(256, 4)] arg_dict["indices_shape"] = [(10, 25, 1)] arg_dict["updates_shape"] = [(10, 25, 4)] for arg in GenArgList(arg_dict): _compare_scatter_nd_update_with_tf(test_case, *arg) def test_tensor_scatter_nd_add(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(12,)] arg_dict["indices_shape"] = [(7, 1)] arg_dict["updates_shape"] = [(7,)] arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_add_with_tf(test_case, *arg) def test_tensor_scatter_nd_add_case1(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(38, 66, 9)] arg_dict["indices_shape"] = [(17, 2)] arg_dict["updates_shape"] = [(17, 9)] arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_add_with_tf(test_case, *arg) def test_tensor_scatter_nd_add_case2(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(2, 7, 19, 41, 33)] arg_dict["indices_shape"] = [(20, 9, 3)] arg_dict["updates_shape"] = [(20, 9, 41, 33)] arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_add_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_scatter_nd_dynamic_indices(test_case): arg_dict = OrderedDict() arg_dict["indices_shape"] = [(12, 10, 2)] arg_dict["updates_shape"] = [(12, 10, 41, 33)] arg_dict["indices_static_shape"] = [(15, 10, 2)] arg_dict["updates_static_shape"] = [(15, 10, 41, 33)] arg_dict["params_shape"] = [(64, 22, 41, 33)] for arg in GenArgList(arg_dict): _compare_scatter_nd_dynamic_indices_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_scatter_nd_empty_indices(test_case): arg_dict = OrderedDict() arg_dict["indices_shape"] = [(0, 1)] arg_dict["updates_shape"] = [(0, 14)] arg_dict["indices_static_shape"] = [(8, 1)] arg_dict["updates_static_shape"] = [(8, 14)] arg_dict["params_shape"] = [(10, 14)] for arg in GenArgList(arg_dict): _compare_scatter_nd_dynamic_indices_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_tensor_scatter_nd_update_dynamic_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(32, 33, 4, 5)] arg_dict["indices_shape"] = [(12, 2)] arg_dict["updates_shape"] = [(12, 4, 5)] arg_dict["indices_static_shape"] = [(14, 2)] arg_dict["updates_static_shape"] = [(14, 4, 5)] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_update_dynamic_indices_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_tensor_scatter_nd_update_empty_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(37, 14)] arg_dict["indices_shape"] = [(7, 0, 1)] arg_dict["updates_shape"] = [(7, 0, 14)] arg_dict["indices_static_shape"] = [(7, 5, 1)] arg_dict["updates_static_shape"] = [(7, 5, 14)] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_update_dynamic_indices_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_tensor_scatter_nd_add_dynamic_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(2, 9, 7, 5, 4)] arg_dict["indices_shape"] = [(12, 5, 3)] arg_dict["updates_shape"] = [(12, 5, 5, 4)] arg_dict["indices_static_shape"] = [(15, 6, 3)] arg_dict["updates_static_shape"] = [(15, 6, 5, 4)] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_add_dynamic_indices_with_tf(test_case, *arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_tensor_scatter_nd_add_empty_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(24, 30, 14)] arg_dict["indices_shape"] = [(0, 2)] arg_dict["updates_shape"] = [(0, 14)] arg_dict["indices_static_shape"] = [(11, 2)] arg_dict["updates_static_shape"] = [(11, 14)] for arg in GenArgList(arg_dict): _compare_tensor_scatter_nd_add_dynamic_indices_with_tf(test_case, *arg) if __name__ == "__main__": unittest.main()
[ "oneflow.FunctionConfig", "oneflow.scatter_nd", "oneflow.scope.consistent_view", "oneflow.tensor_scatter_nd_update", "oneflow.optimizer.PiecewiseConstantScheduler", "oneflow.global_function", "oneflow.typing.Numpy.Placeholder", "oneflow.scope.mirrored_view", "oneflow.tensor_scatter_nd_add", "oneflow.cast_to_current_logical_view", "oneflow.typing.ListNumpy.Placeholder", "oneflow.constant_initializer", "oneflow.train.CheckPoint", "oneflow.scope.placement", "oneflow.unittest.skip_unless_1n1d", "oneflow.watch_diff", "oneflow.clear_default_session" ]
[((789, 840), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (833, 840), True, 'import tensorflow as tf\n'), ((17428, 17460), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (17458, 17460), True, 'import oneflow as flow\n'), ((862, 913), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (902, 913), True, 'import tensorflow as tf\n'), ((1176, 1203), 'numpy.prod', 'np.prod', (['indices_shape[:-1]'], {}), '(indices_shape[:-1])\n', (1183, 1203), True, 'import numpy as np\n'), ((2384, 2412), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2410, 2412), True, 'import oneflow as flow\n'), ((2431, 2452), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2450, 2452), True, 'import oneflow as flow\n'), ((4318, 4338), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (4329, 4338), True, 'import tensorflow as tf\n'), ((4947, 4970), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (4968, 4970), True, 'import oneflow as flow\n'), ((5750, 5769), 'tensorflow.constant', 'tf.constant', (['params'], {}), '(params)\n', (5761, 5769), True, 'import tensorflow as tf\n'), ((5784, 5804), 'tensorflow.constant', 'tf.constant', (['updates'], {}), '(updates)\n', (5795, 5804), True, 'import tensorflow as tf\n'), ((5819, 5839), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (5830, 5839), True, 'import tensorflow as tf\n'), ((6460, 6488), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (6486, 6488), True, 'import oneflow as flow\n'), ((6507, 6528), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6526, 6528), True, 'import oneflow as flow\n'), ((6648, 6711), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (6668, 6711), True, 'import oneflow as flow\n'), ((7817, 7840), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (7838, 7840), True, 'import oneflow as flow\n'), ((8390, 8418), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (8416, 8418), True, 'import oneflow as flow\n'), ((8437, 8458), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8456, 8458), True, 'import oneflow as flow\n'), ((11539, 11558), 'tensorflow.constant', 'tf.constant', (['params'], {}), '(params)\n', (11550, 11558), True, 'import tensorflow as tf\n'), ((11579, 11599), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (11590, 11599), True, 'import tensorflow as tf\n'), ((11620, 11640), 'tensorflow.constant', 'tf.constant', (['updates'], {}), '(updates)\n', (11631, 11640), True, 'import tensorflow as tf\n'), ((13138, 13166), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (13164, 13166), True, 'import oneflow as flow\n'), ((13185, 13206), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (13204, 13206), True, 'import oneflow as flow\n'), ((13324, 13373), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (13344, 13373), True, 'import oneflow as flow\n'), ((14057, 14077), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (14068, 14077), True, 'import tensorflow as tf\n'), ((14086, 14106), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (14097, 14106), True, 'import tensorflow as tf\n'), ((14115, 14160), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['indices_const', 'x', 'params_shape'], {}), '(indices_const, x, params_shape)\n', (14128, 14160), True, 'import tensorflow as tf\n'), ((14481, 14509), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (14507, 14509), True, 'import oneflow as flow\n'), ((14528, 14549), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (14547, 14549), True, 'import oneflow as flow\n'), ((14667, 14716), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (14687, 14716), True, 'import oneflow as flow\n'), ((15591, 15611), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (15602, 15611), True, 'import tensorflow as tf\n'), ((15620, 15639), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (15631, 15639), True, 'import tensorflow as tf\n'), ((15648, 15668), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (15659, 15668), True, 'import tensorflow as tf\n'), ((15677, 15713), 'tensorflow.tensor_scatter_nd_update', 'tf.tensor_scatter_nd_update', (['x', 'i', 'y'], {}), '(x, i, y)\n', (15704, 15713), True, 'import tensorflow as tf\n'), ((16039, 16067), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (16065, 16067), True, 'import oneflow as flow\n'), ((16086, 16107), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (16105, 16107), True, 'import oneflow as flow\n'), ((16225, 16274), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (16245, 16274), True, 'import oneflow as flow\n'), ((17112, 17132), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (17123, 17132), True, 'import tensorflow as tf\n'), ((17141, 17160), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (17152, 17160), True, 'import tensorflow as tf\n'), ((17169, 17189), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (17180, 17189), True, 'import tensorflow as tf\n'), ((17198, 17231), 'tensorflow.tensor_scatter_nd_add', 'tf.tensor_scatter_nd_add', (['x', 'i', 'y'], {}), '(x, i, y)\n', (17222, 17231), True, 'import tensorflow as tf\n'), ((25625, 25640), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25638, 25640), False, 'import unittest\n'), ((3270, 3300), 'oneflow.watch_diff', 'flow.watch_diff', (['x', 'compare_fn'], {}), '(x, compare_fn)\n', (3285, 3300), True, 'import oneflow as flow\n'), ((3346, 3409), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3366, 3409), True, 'import oneflow as flow\n'), ((3698, 3761), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3718, 3761), True, 'import oneflow as flow\n'), ((4348, 4365), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4363, 4365), True, 'import tensorflow as tf\n'), ((4384, 4404), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (4395, 4404), True, 'import tensorflow as tf\n'), ((4417, 4462), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['indices_const', 'x', 'params_shape'], {}), '(indices_const, x, params_shape)\n', (4430, 4462), True, 'import tensorflow as tf\n'), ((5849, 5866), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5864, 5866), True, 'import tensorflow as tf\n'), ((5886, 5905), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (5897, 5905), True, 'import tensorflow as tf\n'), ((5919, 5967), 'tensorflow.tensor_scatter_nd_update', 'tf.tensor_scatter_nd_update', (['x', 'i_const', 'y_const'], {}), '(x, i_const, y_const)\n', (5946, 5967), True, 'import tensorflow as tf\n'), ((6009, 6026), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6024, 6026), True, 'import tensorflow as tf\n'), ((6046, 6066), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (6057, 6066), True, 'import tensorflow as tf\n'), ((6080, 6128), 'tensorflow.tensor_scatter_nd_update', 'tf.tensor_scatter_nd_update', (['x_const', 'i_const', 'y'], {}), '(x_const, i_const, y)\n', (6107, 6128), True, 'import tensorflow as tf\n'), ((6612, 6640), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (6638, 6640), True, 'import oneflow as flow\n'), ((7705, 7738), 'oneflow.watch_diff', 'flow.watch_diff', (['x', 'compare_dz_dx'], {}), '(x, compare_dz_dx)\n', (7720, 7738), True, 'import oneflow as flow\n'), ((7747, 7780), 'oneflow.watch_diff', 'flow.watch_diff', (['y', 'compare_dz_dy'], {}), '(y, compare_dz_dy)\n', (7762, 7780), True, 'import oneflow as flow\n'), ((9707, 9755), 'oneflow.watch_diff', 'flow.watch_diff', (['params_var', 'params_grad_watcher'], {}), '(params_var, params_grad_watcher)\n', (9722, 9755), True, 'import oneflow as flow\n'), ((9764, 9814), 'oneflow.watch_diff', 'flow.watch_diff', (['updates_var', 'updates_grad_watcher'], {}), '(updates_var, updates_grad_watcher)\n', (9779, 9814), True, 'import oneflow as flow\n'), ((9931, 9994), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (9951, 9994), True, 'import oneflow as flow\n'), ((10402, 10425), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (10423, 10425), True, 'import oneflow as flow\n'), ((10689, 10752), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (10709, 10752), True, 'import oneflow as flow\n'), ((11148, 11171), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (11169, 11171), True, 'import oneflow as flow\n'), ((11650, 11667), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11665, 11667), True, 'import tensorflow as tf\n'), ((11696, 11715), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (11707, 11715), True, 'import tensorflow as tf\n'), ((11734, 11800), 'tensorflow.tensor_scatter_nd_add', 'tf.tensor_scatter_nd_add', (['params_var', 'indices_const', 'updates_const'], {}), '(params_var, indices_const, updates_const)\n', (11758, 11800), True, 'import tensorflow as tf\n'), ((11865, 11882), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11880, 11882), True, 'import tensorflow as tf\n'), ((11912, 11932), 'tensorflow.Variable', 'tf.Variable', (['updates'], {}), '(updates)\n', (11923, 11932), True, 'import tensorflow as tf\n'), ((11951, 12017), 'tensorflow.tensor_scatter_nd_add', 'tf.tensor_scatter_nd_add', (['params_const', 'indices_const', 'updates_var'], {}), '(params_const, indices_const, updates_var)\n', (11975, 12017), True, 'import tensorflow as tf\n'), ((13290, 13316), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (13314, 13316), True, 'import oneflow as flow\n'), ((14633, 14659), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (14657, 14659), True, 'import oneflow as flow\n'), ((16191, 16217), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (16215, 16217), True, 'import oneflow as flow\n'), ((17561, 17574), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17572, 17574), False, 'from collections import OrderedDict\n'), ((17819, 17839), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (17829, 17839), False, 'from test_util import GenArgList\n'), ((17961, 17974), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17972, 17974), False, 'from collections import OrderedDict\n'), ((18172, 18192), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18182, 18192), False, 'from test_util import GenArgList\n'), ((18314, 18327), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18325, 18327), False, 'from collections import OrderedDict\n'), ((18530, 18550), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18540, 18550), False, 'from test_util import GenArgList\n'), ((18672, 18685), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18683, 18685), False, 'from collections import OrderedDict\n'), ((18955, 18975), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18965, 18975), False, 'from test_util import GenArgList\n'), ((19097, 19110), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19108, 19110), False, 'from collections import OrderedDict\n'), ((19304, 19324), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (19314, 19324), False, 'from test_util import GenArgList\n'), ((19446, 19459), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19457, 19459), False, 'from collections import OrderedDict\n'), ((19750, 19770), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (19760, 19770), False, 'from test_util import GenArgList\n'), ((19906, 19919), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19917, 19919), False, 'from collections import OrderedDict\n'), ((20120, 20140), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20130, 20140), False, 'from test_util import GenArgList\n'), ((20276, 20289), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20287, 20289), False, 'from collections import OrderedDict\n'), ((20506, 20526), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20516, 20526), False, 'from test_util import GenArgList\n'), ((20662, 20675), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20673, 20675), False, 'from collections import OrderedDict\n'), ((20890, 20910), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20900, 20910), False, 'from test_util import GenArgList\n'), ((21043, 21056), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21054, 21056), False, 'from collections import OrderedDict\n'), ((21301, 21321), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21311, 21321), False, 'from test_util import GenArgList\n'), ((21464, 21477), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21475, 21477), False, 'from collections import OrderedDict\n'), ((21732, 21752), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21742, 21752), False, 'from test_util import GenArgList\n'), ((21895, 21908), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21906, 21908), False, 'from collections import OrderedDict\n'), ((22181, 22201), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (22191, 22201), False, 'from test_util import GenArgList\n'), ((22423, 22436), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22434, 22436), False, 'from collections import OrderedDict\n'), ((22734, 22754), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (22744, 22754), False, 'from test_util import GenArgList\n'), ((22293, 22327), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (22302, 22327), False, 'import os\n'), ((22979, 22992), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22990, 22992), False, 'from collections import OrderedDict\n'), ((23254, 23274), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23264, 23274), False, 'from test_util import GenArgList\n'), ((22851, 22885), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (22860, 22885), False, 'import os\n'), ((23515, 23528), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23526, 23528), False, 'from collections import OrderedDict\n'), ((23804, 23824), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23814, 23824), False, 'from test_util import GenArgList\n'), ((23371, 23405), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (23380, 23405), False, 'import os\n'), ((24077, 24090), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24088, 24090), False, 'from collections import OrderedDict\n'), ((24364, 24384), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24374, 24384), False, 'from test_util import GenArgList\n'), ((23935, 23969), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (23944, 23969), False, 'import os\n'), ((24636, 24649), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24647, 24649), False, 'from collections import OrderedDict\n'), ((24938, 24958), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24948, 24958), False, 'from test_util import GenArgList\n'), ((24495, 24529), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (24504, 24529), False, 'import os\n'), ((25205, 25218), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25216, 25218), False, 'from collections import OrderedDict\n'), ((25486, 25506), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (25496, 25506), False, 'from test_util import GenArgList\n'), ((25066, 25100), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (25075, 25100), False, 'import os\n'), ((1027, 1056), 'numpy.random.rand', 'np.random.rand', (['*params_shape'], {}), '(*params_shape)\n', (1041, 1056), True, 'import numpy as np\n'), ((1090, 1120), 'numpy.random.rand', 'np.random.rand', (['*updates_shape'], {}), '(*updates_shape)\n', (1104, 1120), True, 'import numpy as np\n'), ((1385, 1429), 'numpy.arange', 'np.arange', (['params_shape[col]'], {'dtype': 'np.int32'}), '(params_shape[col], dtype=np.int32)\n', (1394, 1429), True, 'import numpy as np\n'), ((1442, 1473), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_indices'], {}), '(rand_indices)\n', (1459, 1473), True, 'import numpy as np\n'), ((2557, 2583), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2581, 2583), True, 'import oneflow as flow\n'), ((2636, 2664), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2662, 2664), True, 'import oneflow as flow\n'), ((2731, 2771), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2751, 2771), True, 'import oneflow as flow\n'), ((2996, 3032), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x'], {}), '(x)\n', (3029, 3032), True, 'import oneflow as flow\n'), ((3082, 3121), 'oneflow.scatter_nd', 'flow.scatter_nd', (['indices_blob', 'x', 'shape'], {}), '(indices_blob, x, shape)\n', (3097, 3121), True, 'import oneflow as flow\n'), ((6762, 6815), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (6783, 6815), True, 'import oneflow.typing as oft\n'), ((6838, 6892), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (6859, 6892), True, 'import oneflow.typing as oft\n'), ((6909, 6963), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['updates.shape'], {'dtype': 'flow.float'}), '(updates.shape, dtype=flow.float)\n', (6930, 6963), True, 'import oneflow.typing as oft\n'), ((6985, 7025), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (7005, 7025), True, 'import oneflow as flow\n'), ((7507, 7555), 'oneflow.tensor_scatter_nd_update', 'flow.tensor_scatter_nd_update', (['x', 'indices_def', 'y'], {}), '(x, indices_def, y)\n', (7536, 7555), True, 'import oneflow as flow\n'), ((8594, 8634), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (8614, 8634), True, 'import oneflow as flow\n'), ((9102, 9147), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['params_var'], {}), '(params_var)\n', (9135, 9147), True, 'import oneflow as flow\n'), ((9174, 9220), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['params_blob'], {}), '(params_blob)\n', (9207, 9220), True, 'import oneflow as flow\n'), ((9248, 9295), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['updates_blob'], {}), '(updates_blob)\n', (9281, 9295), True, 'import oneflow as flow\n'), ((9322, 9368), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['updates_var'], {}), '(updates_var)\n', (9355, 9368), True, 'import oneflow as flow\n'), ((9490, 9555), 'oneflow.tensor_scatter_nd_add', 'flow.tensor_scatter_nd_add', (['params_var', 'indices_blob', 'updates_var'], {}), '(params_var, indices_blob, updates_var)\n', (9516, 9555), True, 'import oneflow as flow\n'), ((9893, 9919), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (9917, 9919), True, 'import oneflow as flow\n'), ((10649, 10677), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (10675, 10677), True, 'import oneflow as flow\n'), ((12396, 12445), 'numpy.allclose', 'np.allclose', (['tf_params_grad_np', 'of_params_grad_np'], {}), '(tf_params_grad_np, of_params_grad_np)\n', (12407, 12445), True, 'import numpy as np\n'), ((12703, 12754), 'numpy.allclose', 'np.allclose', (['tf_updates_grad_np', 'of_updates_grad_np'], {}), '(tf_updates_grad_np, of_updates_grad_np)\n', (12714, 12754), True, 'import numpy as np\n'), ((13418, 13483), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices_static_shape'], {'dtype': 'flow.int32'}), '(indices_static_shape, dtype=flow.int32)\n', (13443, 13483), True, 'import oneflow.typing as oft\n'), ((13506, 13571), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['updates_static_shape'], {'dtype': 'flow.float'}), '(updates_static_shape, dtype=flow.float)\n', (13531, 13571), True, 'import oneflow.typing as oft\n'), ((13593, 13627), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (13613, 13627), True, 'import oneflow as flow\n'), ((13648, 13703), 'oneflow.scatter_nd', 'flow.scatter_nd', (['indices_def', 'updates_def', 'params_shape'], {}), '(indices_def, updates_def, params_shape)\n', (13663, 13703), True, 'import oneflow as flow\n'), ((14774, 14831), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (14799, 14831), True, 'import oneflow.typing as oft\n'), ((14854, 14919), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices_static_shape'], {'dtype': 'flow.int32'}), '(indices_static_shape, dtype=flow.int32)\n', (14879, 14919), True, 'import oneflow.typing as oft\n'), ((14942, 15007), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['updates_static_shape'], {'dtype': 'flow.float'}), '(updates_static_shape, dtype=flow.float)\n', (14967, 15007), True, 'import oneflow.typing as oft\n'), ((15029, 15063), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (15049, 15063), True, 'import oneflow as flow\n'), ((15084, 15151), 'oneflow.tensor_scatter_nd_update', 'flow.tensor_scatter_nd_update', (['params_def', 'indices_def', 'updates_def'], {}), '(params_def, indices_def, updates_def)\n', (15113, 15151), True, 'import oneflow as flow\n'), ((16329, 16386), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (16354, 16386), True, 'import oneflow.typing as oft\n'), ((16409, 16474), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices_static_shape'], {'dtype': 'flow.int32'}), '(indices_static_shape, dtype=flow.int32)\n', (16434, 16474), True, 'import oneflow.typing as oft\n'), ((16497, 16562), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['updates_static_shape'], {'dtype': 'flow.float'}), '(updates_static_shape, dtype=flow.float)\n', (16522, 16562), True, 'import oneflow.typing as oft\n'), ((16584, 16618), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (16604, 16618), True, 'import oneflow as flow\n'), ((16639, 16703), 'oneflow.tensor_scatter_nd_add', 'flow.tensor_scatter_nd_add', (['params_def', 'indices_def', 'updates_def'], {}), '(params_def, indices_def, updates_def)\n', (16665, 16703), True, 'import oneflow as flow\n'), ((3462, 3520), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (3487, 3520), True, 'import oneflow.typing as oft\n'), ((3547, 3605), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['updates.shape'], {'dtype': 'flow.float'}), '(updates.shape, dtype=flow.float)\n', (3572, 3605), True, 'import oneflow.typing as oft\n'), ((3814, 3868), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (3835, 3868), True, 'import oneflow.typing as oft\n'), ((3895, 3949), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['updates.shape'], {'dtype': 'flow.float'}), '(updates.shape, dtype=flow.float)\n', (3916, 3949), True, 'import oneflow.typing as oft\n'), ((10057, 10114), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (10082, 10114), True, 'import oneflow.typing as oft\n'), ((10141, 10199), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (10166, 10199), True, 'import oneflow.typing as oft\n'), ((10226, 10284), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['updates.shape'], {'dtype': 'flow.float'}), '(updates.shape, dtype=flow.float)\n', (10251, 10284), True, 'import oneflow.typing as oft\n'), ((10815, 10868), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (10836, 10868), True, 'import oneflow.typing as oft\n'), ((10895, 10949), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (10916, 10949), True, 'import oneflow.typing as oft\n'), ((10976, 11030), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['updates.shape'], {'dtype': 'flow.float'}), '(updates.shape, dtype=flow.float)\n', (10997, 11030), True, 'import oneflow.typing as oft\n'), ((1596, 1686), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'params_shape[col]', 'size': '(indices_rows,)', 'dtype': 'np.int32'}), '(low=0, high=params_shape[col], size=(indices_rows,),\n dtype=np.int32)\n', (1613, 1686), True, 'import numpy as np\n'), ((2936, 2964), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2961, 2964), True, 'import oneflow as flow\n'), ((7188, 7216), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (7213, 7216), True, 'import oneflow as flow\n'), ((7395, 7423), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (7420, 7423), True, 'import oneflow as flow\n'), ((8811, 8839), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (8836, 8839), True, 'import oneflow as flow\n'), ((9033, 9061), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (9058, 9061), True, 'import oneflow as flow\n'), ((3170, 3224), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (3211, 3224), True, 'import oneflow as flow\n'), ((7604, 7658), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (7645, 7658), True, 'import oneflow as flow\n'), ((9604, 9658), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (9645, 9658), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import numpy as np import oneflow as flow import tensorflow as tf from test_util import GenArgList import oneflow.typing as oft gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) def _random_inputs(params_shape, indices_shape): params = np.random.rand(*params_shape).astype(np.float32) indices = [] indices_rows = np.prod(indices_shape[:-1]) indices_cols = indices_shape[-1] for col in range(indices_cols): indices_col = np.random.randint( low=0, high=params_shape[col], size=(indices_rows,), dtype=np.int32 ).reshape(indices_shape[:-1]) indices.append(indices_col) indices = np.stack(indices, axis=len(indices_shape) - 1) return params, indices def _make_gather_nd_fn(params, indices, device_type, mirrored, compare_fn): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) if mirrored: func_config.default_logical_view(flow.scope.mirrored_view()) else: func_config.default_logical_view(flow.scope.consistent_view()) def do_gather_nd(x_blob, i_blob): with flow.scope.placement(device_type, "0:0"): x = flow.get_variable( "params", shape=params.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), ) x = flow.cast_to_current_logical_view(x) x = x + x_blob y = flow.gather_nd(x, i_blob) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(y) flow.watch_diff(x, compare_fn) return y if mirrored: @flow.global_function(type="train", function_config=func_config) def gather_nd_fn( params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32), ): return do_gather_nd(params_def, indices_def) else: @flow.global_function(type="train", function_config=func_config) def gather_nd_fn( params_def: oft.Numpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.Numpy.Placeholder(indices.shape, dtype=flow.int32), ): return do_gather_nd(params_def, indices_def) return gather_nd_fn def _of_dynamic_params_gather_nd(params, indices, static_params_shape, compare_fn): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(type="train", function_config=func_config) def gather_nd_fn( params_def: oft.ListNumpy.Placeholder(static_params_shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32), ): with flow.scope.placement("gpu", "0:0"): one_var = flow.get_variable( "one", shape=(1,), dtype=flow.float32, initializer=flow.constant_initializer(1), ) one_var = flow.cast_to_current_logical_view(one_var) params_var = params_def * one_var y = flow.gather_nd(params_var, indices_def) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(y) flow.watch_diff(params_var, compare_fn) return y check_point = flow.train.CheckPoint() check_point.init() return gather_nd_fn([params], [indices]).get().numpy_list()[0] def _compare_gather_nd_with_tf( test_case, device_type, params_shape, indices_shape, mirrored=False ): params, indices = _random_inputs(params_shape, indices_shape) i = tf.constant(indices) with tf.GradientTape() as t: x = tf.Variable(params) y = tf.gather_nd(x, i) dy = t.gradient(y, x) if isinstance(dy, tf.IndexedSlices): test_case.assertTrue( np.array_equal(indices.ravel(), dy.indices.numpy().ravel()) ) zero_params = tf.Variable(np.full(params.shape, 0.0, dtype=np.float32)) dy = tf.tensor_scatter_nd_add(zero_params, i, dy.values) if mirrored: def compare_dy(params_grad): test_case.assertTrue( np.array_equal(dy.numpy(), params_grad.numpy_list()[0]) ) else: def compare_dy(params_grad): test_case.assertTrue(np.array_equal(dy.numpy(), params_grad.numpy())) gather_nd_fn = _make_gather_nd_fn( params, indices, device_type, mirrored, compare_dy ) check_point = flow.train.CheckPoint() check_point.init() if mirrored: of_y = gather_nd_fn([params], [indices]).get().numpy_list()[0] else: of_y = gather_nd_fn(params, indices).get().numpy() test_case.assertTrue(np.array_equal(y.numpy(), of_y)) def _compare_dynamic_gather_nd_with_tf( test_case, params_shape, static_params_shape, indices_shape ): params, indices = _random_inputs(params_shape, indices_shape) i = tf.constant(indices) with tf.GradientTape() as t: x = tf.Variable(params) y = tf.gather_nd(x, i) dy = t.gradient(y, x) if isinstance(dy, tf.IndexedSlices): test_case.assertTrue( np.array_equal(indices.ravel(), dy.indices.numpy().ravel()) ) zero_params = tf.constant(np.full(params.shape, 0.0, dtype=np.float32)) dy = tf.tensor_scatter_nd_add(zero_params, i, dy.values) def compare_dy(params_grad): test_case.assertTrue(np.array_equal(dy.numpy(), params_grad.numpy_list()[0])) of_y = _of_dynamic_params_gather_nd( params, indices, static_params_shape, compare_dy ) test_case.assertTrue(np.array_equal(y.numpy(), of_y)) def _of_gather_nd_dynamic_indices(params, indices, indices_static_shape, device_type): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def gather_nd_fn( params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float), indices_def: oft.ListNumpy.Placeholder(indices_static_shape, dtype=flow.int32), ): with flow.scope.placement(device_type, "0:0"): return flow.gather_nd(params_def, indices_def) return gather_nd_fn([params], [indices]).get().numpy_list()[0] def _compare_gather_nd_dynamic_indices_with_tf( test_case, params_shape, indices_shape, indices_static_shape, device_type ): params, indices = _random_inputs(params_shape, indices_shape) i = tf.constant(indices) x = tf.Variable(params) y = tf.gather_nd(x, i) of_y = _of_gather_nd_dynamic_indices( params, indices, indices_static_shape, device_type ) test_case.assertTrue(np.array_equal(y.numpy(), of_y)) def test_gather_nd(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu", "cpu"] arg_dict["params_shape"] = [(10,)] arg_dict["indices_shape"] = [(5, 1)] for arg in GenArgList(arg_dict): _compare_gather_nd_with_tf(test_case, *arg) def test_gather_nd_case_1(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(20, 10, 10, 3, 3)] arg_dict["indices_shape"] = [(2, 3, 3)] for arg in GenArgList(arg_dict): _compare_gather_nd_with_tf(test_case, *arg) def test_gather_nd_case_2(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["cpu", "gpu"] arg_dict["params_shape"] = [(10, 8, 4)] arg_dict["indices_shape"] = [(2, 2)] arg_dict["mirrored"] = [True] for arg in GenArgList(arg_dict): _compare_gather_nd_with_tf(test_case, *arg) def test_gather_nd_case_3(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(32, 60, 80, 25)] arg_dict["indices_shape"] = [(128, 2)] for arg in GenArgList(arg_dict): _compare_gather_nd_with_tf(test_case, *arg) def test_gather_nd_case_4(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["params_shape"] = [(128, 64, 2, 16, 7)] arg_dict["indices_shape"] = [(30, 10, 3)] arg_dict["mirrored"] = [True] for arg in GenArgList(arg_dict): _compare_gather_nd_with_tf(test_case, *arg) def test_dynamic_gather_nd(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(30, 15)] arg_dict["static_params_shape"] = [(32, 16)] arg_dict["indices_shape"] = [(12, 1)] for arg in GenArgList(arg_dict): _compare_dynamic_gather_nd_with_tf(test_case, *arg) def test_gather_nd_dynamic_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(25, 10)] arg_dict["indices_shape"] = [(11, 1)] arg_dict["indices_static_shape"] = [(15, 1)] arg_dict["device_type"] = ["gpu"] for arg in GenArgList(arg_dict): _compare_gather_nd_dynamic_indices_with_tf(test_case, *arg) def test_gather_nd_empty_indices(test_case): arg_dict = OrderedDict() arg_dict["params_shape"] = [(12, 13, 7)] arg_dict["indices_shape"] = [(5, 0, 2)] arg_dict["indices_static_shape"] = [(5, 10, 2)] arg_dict["device_type"] = ["gpu"] for arg in GenArgList(arg_dict): _compare_gather_nd_dynamic_indices_with_tf(test_case, *arg)
[ "oneflow.FunctionConfig", "oneflow.gather_nd", "oneflow.scope.consistent_view", "oneflow.optimizer.PiecewiseConstantScheduler", "oneflow.global_function", "oneflow.typing.Numpy.Placeholder", "oneflow.scope.mirrored_view", "oneflow.clear_default_session", "oneflow.cast_to_current_logical_view", "oneflow.typing.ListNumpy.Placeholder", "oneflow.constant_initializer", "oneflow.train.CheckPoint", "oneflow.scope.placement", "oneflow.watch_diff" ]
[((763, 814), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (807, 814), True, 'import tensorflow as tf\n'), ((836, 887), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (876, 887), True, 'import tensorflow as tf\n'), ((1037, 1064), 'numpy.prod', 'np.prod', (['indices_shape[:-1]'], {}), '(indices_shape[:-1])\n', (1044, 1064), True, 'import numpy as np\n'), ((1503, 1531), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1529, 1531), True, 'import oneflow as flow\n'), ((1550, 1571), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1569, 1571), True, 'import oneflow as flow\n'), ((3210, 3238), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3236, 3238), True, 'import oneflow as flow\n'), ((3257, 3278), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3276, 3278), True, 'import oneflow as flow\n'), ((3396, 3459), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3416, 3459), True, 'import oneflow as flow\n'), ((4297, 4320), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (4318, 4320), True, 'import oneflow as flow\n'), ((4595, 4615), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (4606, 4615), True, 'import tensorflow as tf\n'), ((5468, 5491), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (5489, 5491), True, 'import oneflow as flow\n'), ((5916, 5936), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (5927, 5936), True, 'import tensorflow as tf\n'), ((6734, 6762), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (6760, 6762), True, 'import oneflow as flow\n'), ((6781, 6802), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6800, 6802), True, 'import oneflow as flow\n'), ((6920, 6969), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (6940, 6969), True, 'import oneflow as flow\n'), ((7554, 7574), 'tensorflow.constant', 'tf.constant', (['indices'], {}), '(indices)\n', (7565, 7574), True, 'import tensorflow as tf\n'), ((7583, 7602), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (7594, 7602), True, 'import tensorflow as tf\n'), ((7611, 7629), 'tensorflow.gather_nd', 'tf.gather_nd', (['x', 'i'], {}), '(x, i)\n', (7623, 7629), True, 'import tensorflow as tf\n'), ((7844, 7857), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7855, 7857), False, 'from collections import OrderedDict\n'), ((7998, 8018), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8008, 8018), False, 'from test_util import GenArgList\n'), ((8127, 8140), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8138, 8140), False, 'from collections import OrderedDict\n'), ((8290, 8310), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8300, 8310), False, 'from test_util import GenArgList\n'), ((8419, 8432), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8430, 8432), False, 'from collections import OrderedDict\n'), ((8612, 8632), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8622, 8632), False, 'from test_util import GenArgList\n'), ((8741, 8754), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8752, 8754), False, 'from collections import OrderedDict\n'), ((8901, 8921), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8911, 8921), False, 'from test_util import GenArgList\n'), ((9030, 9043), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9041, 9043), False, 'from collections import OrderedDict\n'), ((9230, 9250), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9240, 9250), False, 'from test_util import GenArgList\n'), ((9360, 9373), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9371, 9373), False, 'from collections import OrderedDict\n'), ((9522, 9542), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9532, 9542), False, 'from test_util import GenArgList\n'), ((9668, 9681), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9679, 9681), False, 'from collections import OrderedDict\n'), ((9868, 9888), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9878, 9888), False, 'from test_util import GenArgList\n'), ((10020, 10033), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10031, 10033), False, 'from collections import OrderedDict\n'), ((10228, 10248), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (10238, 10248), False, 'from test_util import GenArgList\n'), ((2354, 2384), 'oneflow.watch_diff', 'flow.watch_diff', (['x', 'compare_fn'], {}), '(x, compare_fn)\n', (2369, 2384), True, 'import oneflow as flow\n'), ((2430, 2493), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2450, 2493), True, 'import oneflow as flow\n'), ((2777, 2840), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2797, 2840), True, 'import oneflow as flow\n'), ((3362, 3388), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (3386, 3388), True, 'import oneflow as flow\n'), ((4221, 4260), 'oneflow.watch_diff', 'flow.watch_diff', (['params_var', 'compare_fn'], {}), '(params_var, compare_fn)\n', (4236, 4260), True, 'import oneflow as flow\n'), ((4625, 4642), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4640, 4642), True, 'import tensorflow as tf\n'), ((4661, 4680), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (4672, 4680), True, 'import tensorflow as tf\n'), ((4693, 4711), 'tensorflow.gather_nd', 'tf.gather_nd', (['x', 'i'], {}), '(x, i)\n', (4705, 4711), True, 'import tensorflow as tf\n'), ((4985, 5036), 'tensorflow.tensor_scatter_nd_add', 'tf.tensor_scatter_nd_add', (['zero_params', 'i', 'dy.values'], {}), '(zero_params, i, dy.values)\n', (5009, 5036), True, 'import tensorflow as tf\n'), ((5946, 5963), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5961, 5963), True, 'import tensorflow as tf\n'), ((5982, 6001), 'tensorflow.Variable', 'tf.Variable', (['params'], {}), '(params)\n', (5993, 6001), True, 'import tensorflow as tf\n'), ((6014, 6032), 'tensorflow.gather_nd', 'tf.gather_nd', (['x', 'i'], {}), '(x, i)\n', (6026, 6032), True, 'import tensorflow as tf\n'), ((6306, 6357), 'tensorflow.tensor_scatter_nd_add', 'tf.tensor_scatter_nd_add', (['zero_params', 'i', 'dy.values'], {}), '(zero_params, i, dy.values)\n', (6330, 6357), True, 'import tensorflow as tf\n'), ((6886, 6912), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (6910, 6912), True, 'import oneflow as flow\n'), ((952, 981), 'numpy.random.rand', 'np.random.rand', (['*params_shape'], {}), '(*params_shape)\n', (966, 981), True, 'import numpy as np\n'), ((1676, 1702), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1700, 1702), True, 'import oneflow as flow\n'), ((1755, 1783), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1781, 1783), True, 'import oneflow as flow\n'), ((1837, 1877), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1857, 1877), True, 'import oneflow as flow\n'), ((2100, 2136), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x'], {}), '(x)\n', (2133, 2136), True, 'import oneflow as flow\n'), ((2180, 2205), 'oneflow.gather_nd', 'flow.gather_nd', (['x', 'i_blob'], {}), '(x, i_blob)\n', (2194, 2205), True, 'import oneflow as flow\n'), ((3502, 3566), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['static_params_shape'], {'dtype': 'flow.float'}), '(static_params_shape, dtype=flow.float)\n', (3527, 3566), True, 'import oneflow.typing as oft\n'), ((3589, 3647), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (3614, 3647), True, 'import oneflow.typing as oft\n'), ((3669, 3703), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (3689, 3703), True, 'import oneflow as flow\n'), ((3927, 3969), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['one_var'], {}), '(one_var)\n', (3960, 3969), True, 'import oneflow as flow\n'), ((4032, 4071), 'oneflow.gather_nd', 'flow.gather_nd', (['params_var', 'indices_def'], {}), '(params_var, indices_def)\n', (4046, 4071), True, 'import oneflow as flow\n'), ((4926, 4970), 'numpy.full', 'np.full', (['params.shape', '(0.0)'], {'dtype': 'np.float32'}), '(params.shape, 0.0, dtype=np.float32)\n', (4933, 4970), True, 'import numpy as np\n'), ((6247, 6291), 'numpy.full', 'np.full', (['params.shape', '(0.0)'], {'dtype': 'np.float32'}), '(params.shape, 0.0, dtype=np.float32)\n', (6254, 6291), True, 'import numpy as np\n'), ((7012, 7069), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (7037, 7069), True, 'import oneflow.typing as oft\n'), ((7092, 7157), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices_static_shape'], {'dtype': 'flow.int32'}), '(indices_static_shape, dtype=flow.int32)\n', (7117, 7157), True, 'import oneflow.typing as oft\n'), ((7179, 7219), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (7199, 7219), True, 'import oneflow as flow\n'), ((7240, 7279), 'oneflow.gather_nd', 'flow.gather_nd', (['params_def', 'indices_def'], {}), '(params_def, indices_def)\n', (7254, 7279), True, 'import oneflow as flow\n'), ((1160, 1250), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'params_shape[col]', 'size': '(indices_rows,)', 'dtype': 'np.int32'}), '(low=0, high=params_shape[col], size=(indices_rows,),\n dtype=np.int32)\n', (1177, 1250), True, 'import numpy as np\n'), ((2544, 2601), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (2569, 2601), True, 'import oneflow.typing as oft\n'), ((2628, 2686), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (2653, 2686), True, 'import oneflow.typing as oft\n'), ((2891, 2944), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['params.shape'], {'dtype': 'flow.float'}), '(params.shape, dtype=flow.float)\n', (2912, 2944), True, 'import oneflow.typing as oft\n'), ((2971, 3025), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (2992, 3025), True, 'import oneflow.typing as oft\n'), ((2040, 2068), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2065, 2068), True, 'import oneflow as flow\n'), ((3861, 3889), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(1)'], {}), '(1)\n', (3886, 3889), True, 'import oneflow as flow\n'), ((2254, 2308), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (2295, 2308), True, 'import oneflow as flow\n'), ((4120, 4174), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4161, 4174), True, 'import oneflow as flow\n')]
import math import oneflow as flow from oneflow_gpt import distribute as dist from oneflow_gpt.config import get_args from oneflow_gpt.logger import print_rank_0 class GPTModel(flow.nn.Module): def __init__(self): super().__init__() args = get_args() self.batch_size = args.global_batch_size // args.num_accumulation_steps self.seq_length = args.seq_length self.hidden_size = args.hidden_size self.embedding = Embedding( self.seq_length, self.hidden_size, args.padded_vocab_size ) self.transformer = Transformer(self.hidden_size) self.logits = Logits() def forward(self, tokens): # tokens shape: (batch_size, seq_length) # sbp: [S(0), B] assert tokens.ndim == 2 assert tokens.shape[0] == self.batch_size assert tokens.shape[1] == self.seq_length hidden_states = self.embedding(tokens) h = self.transformer(hidden_states) assert h.shape[0] == self.batch_size assert h.shape[1] == self.seq_length assert h.shape[2] == self.hidden_size return self.logits(h, self.embedding.wte) class Logits(flow.nn.Module): def __init__(self): super().__init__() def forward(self, hidden_states, word_embeddings): assert hidden_states.ndim == 3 w = word_embeddings.to_consistent(placement=hidden_states.placement) # h.grad.sbp: [S(0), P] -> [S(0), B] h = hidden_states.to_consistent(grad_sbp=hidden_states.sbp) # shape sign: (B * S, H) x (H, V) -> (B * S, V) # matmul fwd sbp sign: [S(0), B] x [B, S(1)] (wte.T) -> [S(0), S(1)] # bwd h.grad sbp sign: [S(0), S(1)] (lgs.grad) x [B, S(0)] (wte) -> [S(0), P] (h.grad) lgs = flow._C.matmul(h, w, transpose_b=True) return lgs class Embedding(flow.nn.Module): def __init__(self, seq_length, hidden_size, vocab_size): super().__init__() self.seq_length = seq_length self.hidden_size = hidden_size self.vocab_size = vocab_size args = get_args() self.dropout = flow.nn.Dropout(p=args.hidden_dropout) self.enable_amp = args.fp16 # word token embedding shape (vocab_size, hidden_size) # sbp: [B, S(0)] self.wte = flow.nn.Parameter( flow.empty( (self.vocab_size, self.hidden_size), dtype=flow.float32, placement=dist.get_layer_placement(0), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]), ) ) # word position embedding shape (seq_len, hidden_size) # sbp: [B, B] self.wpe = flow.nn.Parameter( flow.empty( (self.seq_length, self.hidden_size), dtype=flow.float32, placement=dist.get_layer_placement(0), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) flow.nn.init.normal_(self.wte, std=args.init_method_std) flow.nn.init.normal_(self.wpe, std=args.init_method_std) def forward(self, tokens): # tokens shape: (batch_size, seq_len) # sbp: [S(0), B] assert tokens.ndim == 2 assert tokens.shape[-1] == self.seq_length if self.enable_amp: wte = flow._C.amp_white_identity(self.wte) wpe = flow._C.amp_white_identity(self.wpe) else: wte = self.wte wpe = self.wpe # wte.grad: [P, S(0)] -> [B, S(0)] # wte = wte.to_consistent(grad_sbp=self.wte.sbp) # gather forward sbp sign: [B, S(0)] x [S(0), B] -> [S(0), P] # backward sbp sign: # [S(0), B] (h.grad) x [S(0), B] (tokens) x [B, S(0)] (wte) -> [P, S(0)] (wte.grad) h = flow._C.gather(wte, tokens, axis=0) # hidden_states shape: (batch_size, sel_len, hidden_size) # hidden_states: [S(0), P] -> [S(0), B] h = h.to_consistent(sbp=dist.get_hidden_sbp()) # (h + self.wpe) will apply broadcast_add, # shape sign: (batch_size, sel_len, hidden_size) + (sel_len, hidden_size) # -> (batch_size, sel_len, hidden_size) # sbp sign: [S(0), B] + [B, B] -> [S(0), B] return self.dropout(h + wpe) def init_method_normal(sigma): """Init method based on N(0, sigma).""" def init_(tensor): return flow.nn.init.normal_(tensor, mean=0.0, std=sigma) return init_ def scaled_init_method_normal(sigma, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): return flow.nn.init.normal_(tensor, mean=0.0, std=std) return init_ class Transformer(flow.nn.Module): def __init__(self, hidden_size): super().__init__() self.hidden_size = hidden_size args = get_args() self.is_seq_len_dim_leading = True if args.multihead_attention_fusion else False self.num_layers = args.num_layers self._build_layers(args.init_method_std) self.layernorm_f = LayerNorm(-1, (self.hidden_size,)) def _build_layers(self, init_method_std): for i in range(self.num_layers): setattr( self, f"layer_{i}", TransformerLayer( i, self.hidden_size, self.is_seq_len_dim_leading, init_method=init_method_normal(init_method_std), output_layer_init_method=scaled_init_method_normal( init_method_std, self.num_layers ), ), ) setattr(self, f"layer_checkpoint_{i}", ActivationCheckpointing(i)) def _get_layer(self, layer_idx): layer = getattr(self, f"layer_{layer_idx}") checkpoint = getattr(self, f"layer_checkpoint_{layer_idx}") return layer, checkpoint def forward(self, hidden_states): # hidden_states shape: (batch_size, seq_length, hidden_size) # sbp: [S(0), B] assert hidden_states.ndim == 3 assert hidden_states.shape[-1] == self.hidden_size if self.is_seq_len_dim_leading: h = hidden_states.transpose(0, 1) else: h = hidden_states for i in range(self.num_layers): layer, checkpoint = self._get_layer(i) h = layer(checkpoint(h)) h = self.layernorm_f(h) assert h.ndim == 3 if self.is_seq_len_dim_leading: h = h.transpose(0, 1) return h class ActivationCheckpointing(flow.nn.Module): def __init__(self, layer_idx): super().__init__() self.layer_idx = layer_idx def forward(self, x): x = x.to_consistent(placement=dist.get_layer_placement(self.layer_idx)) return flow._C.identity(x) class TransformerLayer(flow.nn.Module): def __init__( self, layer_idx, hidden_size, is_seq_len_dim_leading, init_method, output_layer_init_method, ): super().__init__() self.hidden_size = hidden_size self.layer_idx = layer_idx args = get_args() self.attn = SelfAttention( layer_idx, hidden_size, is_seq_len_dim_leading, args.hidden_dropout, init_method, output_layer_init_method, ) self.mlp = MLP( layer_idx, hidden_size, args.hidden_dropout, init_method, output_layer_init_method, ) self.layernorm_1 = LayerNorm(layer_idx, (self.hidden_size,)) self.layernorm_2 = LayerNorm(layer_idx, (self.hidden_size,)) def forward(self, hidden_states): # hidden_states shape: (batch_size, seq_length, hidden_size) # sbp: [S(0), B] assert hidden_states.ndim == 3 assert hidden_states.shape[-1] == self.hidden_size h = hidden_states norm1 = self.layernorm_1(h) h = h + self.attn(norm1) norm2 = self.layernorm_2(h) h = h + self.mlp(norm2) return h class SelfAttention(flow.nn.Module): def __init__( self, layer_idx, hidden_size, is_seq_len_dim_leading, hidden_dropout_rate, init_method, output_layer_init_method, ): super().__init__() self.hidden_size = hidden_size self.is_seq_len_dim_leading = is_seq_len_dim_leading args = get_args() self.num_heads = args.num_attention_heads self.head_size = args.hidden_size // args.num_attention_heads self.attention_dropout_rate = args.attention_dropout self.scale_tril_softmax_dropout_fusion = args.scale_tril_softmax_dropout_fusion self.multihead_attention_fusion = args.multihead_attention_fusion if not self.scale_tril_softmax_dropout_fusion: self.multihead_attn_dropout = flow.nn.Dropout(p=self.attention_dropout_rate) self.norm_factor = math.sqrt(float(self.head_size)) self.coeff = 1.0 if args.apply_query_key_layer_scaling: self.coeff = float(layer_idx + 1) self.norm_factor *= self.coeff self.c_attn = ColumnParallelLinear( layer_idx, self.hidden_size, self.hidden_size * 3, init_method, ) self.c_proj = RowParallelLinear( layer_idx, self.hidden_size, self.hidden_size, output_layer_init_method, dropout_rate=hidden_dropout_rate, ) def query_key_value(self, h): """ Split input to q, k, v and split hidden states into heads, shape: (batch_size, seq_length, hidden_size) -> (batch_size, seq_length, num_attn_heads, head_size) -> (batch_size, num_attn_heads, seq_length, head_size) """ # Note: 3 is between num_heads and head_size # that ensure the features of heads of q, k, v is contiguously arranged new_shape = ( h.shape[0], h.shape[1], self.num_heads, 3 * self.head_size, ) if self.is_seq_len_dim_leading: # (seq_len, batch_size, num_heads, head_size) -> (batch_size, num_heads, seq_len, head_size) perm = [1, 2, 0, 3] else: # (batch_size, seq_len, num_heads, head_size) -> (batch_size, num_heads, seq_len, head_size) perm = [0, 2, 1, 3] h = h.view(*new_shape) q, k, v = ( flow._C.transpose( h[:, :, :, (i * self.head_size) : ((i + 1) * self.head_size)], perm=perm, ) for i in range(3) ) return q, k, v def multihead_attn(self, q, k, v): # q, k, v shape: (batch_size, num_heads, seq_length, head_size) # q * k: batch_matmul # shape sign: (b, n, s, h) x (b, n, h, s) (k.T) -> (b, n, s, s) # sbp sign: [S(0), S(1)] x [S(0), S(1)] -> [S(0), S(1)] qmk = flow._C.matmul(q, k, transpose_b=True, alpha=(1.0 / self.norm_factor)) qmk = self.tril_softmax_dropout(qmk) # w * v: batch_matmul # shape sign: (b, n, s, s) x (b, n, s, h) -> (b, n, s, h) # sbp sign: [S(0), S(1)] x [S(0), S(1)] -> [S(0), S(1)] return flow._C.matmul(qmk, v) def tril_softmax_dropout(self, x): if self.scale_tril_softmax_dropout_fusion: x = flow._C.fused_scale_tril_softmax_dropout( x, diagonal=0, scale=self.coeff, fill_value=float("-inf"), rate=self.attention_dropout_rate, ) else: x = flow._C.fused_scale_tril(x, fill_value=float("-inf"), scale=self.coeff) x = flow._C.softmax(x, dim=x.ndim - 1) # x = flow._C.softmax(x) x = self.multihead_attn_dropout(x) return x def fused_multihead_attn(self, h): qmk, v = flow._C.fused_self_attention_query_mul_key_and_value( h, head_size=self.head_size, alpha=(1.0 / self.norm_factor) ) qmk = self.tril_softmax_dropout(qmk) return flow._C.matmul(qmk, v) def forward(self, hidden_states): # hidden_states shape: (batch_size, seq_len, hidden_size) # or (seq_len, batch_size, hidden_size) [seq_len dim leading] # sbp: [S(0), B] assert hidden_states.shape[-1] == self.hidden_size h = self.c_attn(hidden_states) if self.multihead_attention_fusion and self.is_seq_len_dim_leading: h = self.fused_multihead_attn(h) else: q, k, v = self.query_key_value(h) h = self.multihead_attn(q, k, v) if self.is_seq_len_dim_leading: # (batch_size, num_heads, seq_len, head_size) -> (seq_len, batch_size, num_heads, head_size) h = flow._C.transpose(h, perm=(2, 0, 1, 3)) else: # (batch_size, num_heads, seq_len, head_size) -> (batch_size, seq_len, num_heads, head_size) h = flow._C.transpose(h, perm=(0, 2, 1, 3)) h = self.c_proj(h.flatten(2)) return h class MLP(flow.nn.Module): def __init__( self, layer_idx, hidden_size, hidden_dropout_rate, init_method, output_layer_init_method, ): super().__init__() self.hidden_size = hidden_size self.c_fc = ColumnParallelLinear( layer_idx, self.hidden_size, self.hidden_size * 4, init_method, need_gelu=True, ) self.c_proj = RowParallelLinear( layer_idx, self.hidden_size * 4, self.hidden_size, output_layer_init_method, dropout_rate=hidden_dropout_rate, ) def forward(self, hidden_states): # hidden_states shape: (batch_size, seq_length, hidden_size) # sbp: [S(0), B] assert hidden_states.shape[-1] == self.hidden_size h = self.c_fc(hidden_states) h = self.c_proj(h) return h class LayerNorm(flow.nn.Module): def __init__( self, layer_idx, normalized_shape, eps=1e-5, ): super().__init__() self.normalized_shape = normalized_shape self.epsilon = eps self.beta = flow.nn.Parameter( flow.empty( normalized_shape, dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) flow.nn.init.zeros_(self.beta) self.gamma = flow.nn.Parameter( flow.empty( normalized_shape, dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) flow.nn.init.ones_(self.gamma) def forward(self, x): assert x.shape[-len(self.normalized_shape) :] == self.normalized_shape begin_norm_axis = x.ndim - len(self.normalized_shape) begin_params_axis = x.ndim - len(self.normalized_shape) y = flow._C.layer_norm_affine( x, self.gamma, self.beta, begin_norm_axis=begin_norm_axis, begin_params_axis=begin_params_axis, epsilon=self.epsilon, ) return y class ColumnParallelLinear(flow.nn.Module): def __init__( self, layer_idx, input_size, output_size, init_method, need_gelu=False ): super().__init__() self.need_gelu = need_gelu args = get_args() self.bias_gelu_fusion = args.bias_gelu_fusion # col parallel linear weight sbp: [B, S(1)] self.weight = flow.nn.Parameter( flow.empty( (input_size, output_size), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)]), ) ) init_method(self.weight) # col parallel linear bias sbp: [B, S(0)] self.bias = flow.nn.Parameter( flow.empty( (output_size,), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]), ) ) flow.nn.init.zeros_(self.bias) def forward(self, x): # x sbp: [S(0), B] # x.grad sbp: [S(0), P] -> [S(0), B] x = x.to_consistent(grad_sbp=x.sbp) # matmul sbp sign: [S(0), B] x [B, S(1)] -> [S(0), S(1)] # x.grad sbp sign: [S(0), S(1)] x [B, S(0)] (weight.T) -> [S(0), P] x = flow._C.matmul(x, self.weight) if self.need_gelu: if self.bias_gelu_fusion: x = flow._C.fused_bias_add_gelu(x, self.bias, axis=x.ndim - 1) else: x = x + self.bias x = flow._C.gelu(x) else: # broadcast_add shape sign: # (input_size, output_size) + (output_size, ) = (input_size, output_size) # bias_add sbp sign: [S(0), S(1)] + [B, S(0)] = [S(0), S(1)] x = x + self.bias return x class RowParallelLinear(flow.nn.Module): def __init__( self, layer_idx, input_size, output_size, init_method, dropout_rate, ): super().__init__() self.dropout_rate = dropout_rate args = get_args() self.bias_dropout_fusion = args.bias_dropout_fusion if not self.bias_dropout_fusion: self.dropout = flow.nn.Dropout(p=dropout_rate) # col parallel linear weight sbp: [B, S(0)] self.weight = flow.nn.Parameter( flow.empty( (input_size, output_size), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]), ) ) init_method(self.weight) # col parallel linear bias sbp: [B, B] self.bias = flow.nn.Parameter( flow.empty( (output_size,), dtype=flow.float32, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) flow.nn.init.zeros_(self.bias) def forward(self, x): # x.sbp: [S(0), S(1)] # matmul sbp sign: [S(0), S(1)] x [B, S(0)] -> [S(0), P] # backward x.grad sbp sign: [S(0), B] x [B, S(1)] (weight.T) -> [S(0), S(1)] x = flow._C.matmul(x, self.weight) # x.sbp: [S(0), P] -> [S(0), B] x = x.to_consistent(sbp=dist.get_hidden_sbp()) if self.bias_dropout_fusion: x = flow._C.fused_bias_add_dropout( x, self.bias, p=self.dropout_rate, axis=x.ndim - 1 ) else: x = x + self.bias x = self.dropout(x) return x class ParallelSparseSoftmaxCrossEntropyLoss(flow.nn.Module): def __init__(self): super().__init__() def forward(self, logits, labels): # logits shape: (batch_size, seq_length, vocab_size) # sbp: [S(0), S(2)] # labels shape: (batch_size, seq_length) # sbp: [S(0), B] assert logits.ndim == 3 assert labels.ndim == 2 assert logits.shape[0:2] == labels.shape loss = flow._C.sparse_softmax_cross_entropy( logits.view(-1, logits.shape[-1]), labels.view(-1) ) if ( not logits.is_consistent or flow.sbp.split(logits.ndim - 1) not in logits.sbp ): loss = flow._C.amp_white_identity(loss) return loss.mean()
[ "oneflow._C.amp_white_identity", "oneflow._C.gelu", "oneflow._C.fused_bias_add_dropout", "oneflow._C.identity", "oneflow._C.fused_self_attention_query_mul_key_and_value", "oneflow.nn.init.ones_", "oneflow._C.layer_norm_affine", "oneflow._C.softmax", "oneflow._C.matmul", "oneflow._C.gather", "oneflow.nn.Dropout", "oneflow.nn.init.normal_", "oneflow.sbp.split", "oneflow.nn.init.zeros_", "oneflow._C.fused_bias_add_gelu", "oneflow._C.transpose" ]
[((263, 273), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (271, 273), False, 'from oneflow_gpt.config import get_args\n'), ((1773, 1811), 'oneflow._C.matmul', 'flow._C.matmul', (['h', 'w'], {'transpose_b': '(True)'}), '(h, w, transpose_b=True)\n', (1787, 1811), True, 'import oneflow as flow\n'), ((2083, 2093), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (2091, 2093), False, 'from oneflow_gpt.config import get_args\n'), ((2117, 2155), 'oneflow.nn.Dropout', 'flow.nn.Dropout', ([], {'p': 'args.hidden_dropout'}), '(p=args.hidden_dropout)\n', (2132, 2155), True, 'import oneflow as flow\n'), ((2993, 3049), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['self.wte'], {'std': 'args.init_method_std'}), '(self.wte, std=args.init_method_std)\n', (3013, 3049), True, 'import oneflow as flow\n'), ((3058, 3114), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['self.wpe'], {'std': 'args.init_method_std'}), '(self.wpe, std=args.init_method_std)\n', (3078, 3114), True, 'import oneflow as flow\n'), ((3813, 3848), 'oneflow._C.gather', 'flow._C.gather', (['wte', 'tokens'], {'axis': '(0)'}), '(wte, tokens, axis=0)\n', (3827, 3848), True, 'import oneflow as flow\n'), ((4412, 4461), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['tensor'], {'mean': '(0.0)', 'std': 'sigma'}), '(tensor, mean=0.0, std=sigma)\n', (4432, 4461), True, 'import oneflow as flow\n'), ((4612, 4639), 'math.sqrt', 'math.sqrt', (['(2.0 * num_layers)'], {}), '(2.0 * num_layers)\n', (4621, 4639), False, 'import math\n'), ((4679, 4726), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['tensor'], {'mean': '(0.0)', 'std': 'std'}), '(tensor, mean=0.0, std=std)\n', (4699, 4726), True, 'import oneflow as flow\n'), ((4901, 4911), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (4909, 4911), False, 'from oneflow_gpt.config import get_args\n'), ((6897, 6916), 'oneflow._C.identity', 'flow._C.identity', (['x'], {}), '(x)\n', (6913, 6916), True, 'import oneflow as flow\n'), ((7242, 7252), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (7250, 7252), False, 'from oneflow_gpt.config import get_args\n'), ((8586, 8596), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (8594, 8596), False, 'from oneflow_gpt.config import get_args\n'), ((11142, 11210), 'oneflow._C.matmul', 'flow._C.matmul', (['q', 'k'], {'transpose_b': '(True)', 'alpha': '(1.0 / self.norm_factor)'}), '(q, k, transpose_b=True, alpha=1.0 / self.norm_factor)\n', (11156, 11210), True, 'import oneflow as flow\n'), ((11433, 11455), 'oneflow._C.matmul', 'flow._C.matmul', (['qmk', 'v'], {}), '(qmk, v)\n', (11447, 11455), True, 'import oneflow as flow\n'), ((12104, 12220), 'oneflow._C.fused_self_attention_query_mul_key_and_value', 'flow._C.fused_self_attention_query_mul_key_and_value', (['h'], {'head_size': 'self.head_size', 'alpha': '(1.0 / self.norm_factor)'}), '(h, head_size=self.\n head_size, alpha=1.0 / self.norm_factor)\n', (12156, 12220), True, 'import oneflow as flow\n'), ((12300, 12322), 'oneflow._C.matmul', 'flow._C.matmul', (['qmk', 'v'], {}), '(qmk, v)\n', (12314, 12322), True, 'import oneflow as flow\n'), ((14753, 14783), 'oneflow.nn.init.zeros_', 'flow.nn.init.zeros_', (['self.beta'], {}), '(self.beta)\n', (14772, 14783), True, 'import oneflow as flow\n'), ((15093, 15123), 'oneflow.nn.init.ones_', 'flow.nn.init.ones_', (['self.gamma'], {}), '(self.gamma)\n', (15111, 15123), True, 'import oneflow as flow\n'), ((15368, 15516), 'oneflow._C.layer_norm_affine', 'flow._C.layer_norm_affine', (['x', 'self.gamma', 'self.beta'], {'begin_norm_axis': 'begin_norm_axis', 'begin_params_axis': 'begin_params_axis', 'epsilon': 'self.epsilon'}), '(x, self.gamma, self.beta, begin_norm_axis=\n begin_norm_axis, begin_params_axis=begin_params_axis, epsilon=self.epsilon)\n', (15393, 15516), True, 'import oneflow as flow\n'), ((15840, 15850), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (15848, 15850), False, 'from oneflow_gpt.config import get_args\n'), ((16655, 16685), 'oneflow.nn.init.zeros_', 'flow.nn.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (16674, 16685), True, 'import oneflow as flow\n'), ((16982, 17012), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (16996, 17012), True, 'import oneflow as flow\n'), ((17735, 17745), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (17743, 17745), False, 'from oneflow_gpt.config import get_args\n'), ((18654, 18684), 'oneflow.nn.init.zeros_', 'flow.nn.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (18673, 18684), True, 'import oneflow as flow\n'), ((18904, 18934), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (18918, 18934), True, 'import oneflow as flow\n'), ((3348, 3384), 'oneflow._C.amp_white_identity', 'flow._C.amp_white_identity', (['self.wte'], {}), '(self.wte)\n', (3374, 3384), True, 'import oneflow as flow\n'), ((3403, 3439), 'oneflow._C.amp_white_identity', 'flow._C.amp_white_identity', (['self.wpe'], {}), '(self.wpe)\n', (3429, 3439), True, 'import oneflow as flow\n'), ((9038, 9084), 'oneflow.nn.Dropout', 'flow.nn.Dropout', ([], {'p': 'self.attention_dropout_rate'}), '(p=self.attention_dropout_rate)\n', (9053, 9084), True, 'import oneflow as flow\n'), ((10648, 10737), 'oneflow._C.transpose', 'flow._C.transpose', (['h[:, :, :, i * self.head_size:(i + 1) * self.head_size]'], {'perm': 'perm'}), '(h[:, :, :, i * self.head_size:(i + 1) * self.head_size],\n perm=perm)\n', (10665, 10737), True, 'import oneflow as flow\n'), ((11910, 11944), 'oneflow._C.softmax', 'flow._C.softmax', (['x'], {'dim': '(x.ndim - 1)'}), '(x, dim=x.ndim - 1)\n', (11925, 11944), True, 'import oneflow as flow\n'), ((13011, 13050), 'oneflow._C.transpose', 'flow._C.transpose', (['h'], {'perm': '(2, 0, 1, 3)'}), '(h, perm=(2, 0, 1, 3))\n', (13028, 13050), True, 'import oneflow as flow\n'), ((13186, 13225), 'oneflow._C.transpose', 'flow._C.transpose', (['h'], {'perm': '(0, 2, 1, 3)'}), '(h, perm=(0, 2, 1, 3))\n', (13203, 13225), True, 'import oneflow as flow\n'), ((17874, 17905), 'oneflow.nn.Dropout', 'flow.nn.Dropout', ([], {'p': 'dropout_rate'}), '(p=dropout_rate)\n', (17889, 17905), True, 'import oneflow as flow\n'), ((19083, 19170), 'oneflow._C.fused_bias_add_dropout', 'flow._C.fused_bias_add_dropout', (['x', 'self.bias'], {'p': 'self.dropout_rate', 'axis': '(x.ndim - 1)'}), '(x, self.bias, p=self.dropout_rate, axis=x.\n ndim - 1)\n', (19113, 19170), True, 'import oneflow as flow\n'), ((19993, 20025), 'oneflow._C.amp_white_identity', 'flow._C.amp_white_identity', (['loss'], {}), '(loss)\n', (20019, 20025), True, 'import oneflow as flow\n'), ((3995, 4016), 'oneflow_gpt.distribute.get_hidden_sbp', 'dist.get_hidden_sbp', ([], {}), '()\n', (4014, 4016), True, 'from oneflow_gpt import distribute as dist\n'), ((6840, 6880), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['self.layer_idx'], {}), '(self.layer_idx)\n', (6864, 6880), True, 'from oneflow_gpt import distribute as dist\n'), ((17098, 17156), 'oneflow._C.fused_bias_add_gelu', 'flow._C.fused_bias_add_gelu', (['x', 'self.bias'], {'axis': '(x.ndim - 1)'}), '(x, self.bias, axis=x.ndim - 1)\n', (17125, 17156), True, 'import oneflow as flow\n'), ((17229, 17244), 'oneflow._C.gelu', 'flow._C.gelu', (['x'], {}), '(x)\n', (17241, 17244), True, 'import oneflow as flow\n'), ((19007, 19028), 'oneflow_gpt.distribute.get_hidden_sbp', 'dist.get_hidden_sbp', ([], {}), '()\n', (19026, 19028), True, 'from oneflow_gpt import distribute as dist\n'), ((19913, 19944), 'oneflow.sbp.split', 'flow.sbp.split', (['(logits.ndim - 1)'], {}), '(logits.ndim - 1)\n', (19927, 19944), True, 'import oneflow as flow\n'), ((2458, 2485), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['(0)'], {}), '(0)\n', (2482, 2485), True, 'from oneflow_gpt import distribute as dist\n'), ((2852, 2879), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['(0)'], {}), '(0)\n', (2876, 2879), True, 'from oneflow_gpt import distribute as dist\n'), ((2901, 2958), 'oneflow_gpt.distribute.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (2916, 2958), True, 'from oneflow_gpt import distribute as dist\n'), ((14605, 14640), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (14629, 14640), True, 'from oneflow_gpt import distribute as dist\n'), ((14662, 14719), 'oneflow_gpt.distribute.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (14677, 14719), True, 'from oneflow_gpt import distribute as dist\n'), ((14945, 14980), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (14969, 14980), True, 'from oneflow_gpt import distribute as dist\n'), ((15002, 15059), 'oneflow_gpt.distribute.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (15017, 15059), True, 'from oneflow_gpt import distribute as dist\n'), ((16128, 16163), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (16152, 16163), True, 'from oneflow_gpt import distribute as dist\n'), ((16508, 16543), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (16532, 16543), True, 'from oneflow_gpt import distribute as dist\n'), ((18129, 18164), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (18153, 18164), True, 'from oneflow_gpt import distribute as dist\n'), ((18506, 18541), 'oneflow_gpt.distribute.get_layer_placement', 'dist.get_layer_placement', (['layer_idx'], {}), '(layer_idx)\n', (18530, 18541), True, 'from oneflow_gpt import distribute as dist\n'), ((18563, 18620), 'oneflow_gpt.distribute.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (18578, 18620), True, 'from oneflow_gpt import distribute as dist\n'), ((2544, 2561), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2558, 2561), True, 'import oneflow as flow\n'), ((16222, 16239), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (16236, 16239), True, 'import oneflow as flow\n'), ((16602, 16619), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (16616, 16619), True, 'import oneflow as flow\n'), ((18223, 18240), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (18237, 18240), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np import oneflow as flow import oneflow.unittest @flow.unittest.skip_unless_1n1d() class TestDot(flow.unittest.TestCase): def test_dot_shape_error_msg(test_case): with test_case.assertRaises(RuntimeError) as exp: a = flow.tensor([2, 3]) b = flow.tensor([2, 3, 4]) flow.dot(a, b) test_case.assertTrue("inconsistent tensor size" in str(exp.exception)) def test_dot_dims_error_msg(test_case): with test_case.assertRaises(RuntimeError) as exp: a = flow.tensor([[2, 3], [3, 4]]) flow.dot(a, a) test_case.assertTrue("1D tensors expected" in str(exp.exception)) def test_dot_dtype_error_msg(test_case): with test_case.assertRaises(RuntimeError) as exp: a = flow.tensor([2, 3], dtype=flow.int64) b = flow.tensor([2, 3], dtype=flow.float32) flow.dot(a, b) test_case.assertTrue( "expected both vectors to have same dtype" in str(exp.exception) ) if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.skip_unless_1n1d", "oneflow.tensor", "oneflow.dot" ]
[((675, 707), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (705, 707), True, 'import oneflow as flow\n'), ((1672, 1687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1685, 1687), False, 'import unittest\n'), ((866, 885), 'oneflow.tensor', 'flow.tensor', (['[2, 3]'], {}), '([2, 3])\n', (877, 885), True, 'import oneflow as flow\n'), ((902, 924), 'oneflow.tensor', 'flow.tensor', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (913, 924), True, 'import oneflow as flow\n'), ((937, 951), 'oneflow.dot', 'flow.dot', (['a', 'b'], {}), '(a, b)\n', (945, 951), True, 'import oneflow as flow\n'), ((1150, 1179), 'oneflow.tensor', 'flow.tensor', (['[[2, 3], [3, 4]]'], {}), '([[2, 3], [3, 4]])\n', (1161, 1179), True, 'import oneflow as flow\n'), ((1192, 1206), 'oneflow.dot', 'flow.dot', (['a', 'a'], {}), '(a, a)\n', (1200, 1206), True, 'import oneflow as flow\n'), ((1401, 1438), 'oneflow.tensor', 'flow.tensor', (['[2, 3]'], {'dtype': 'flow.int64'}), '([2, 3], dtype=flow.int64)\n', (1412, 1438), True, 'import oneflow as flow\n'), ((1455, 1494), 'oneflow.tensor', 'flow.tensor', (['[2, 3]'], {'dtype': 'flow.float32'}), '([2, 3], dtype=flow.float32)\n', (1466, 1494), True, 'import oneflow as flow\n'), ((1507, 1521), 'oneflow.dot', 'flow.dot', (['a', 'b'], {}), '(a, b)\n', (1515, 1521), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import typing from google.protobuf import text_format import oneflow._oneflow_internal from oneflow.compatible import single_client as flow from oneflow.compatible.single_client.framework import c_api_util as c_api_util from oneflow.compatible.single_client.framework import session_context as session_ctx from oneflow.core.job import job_conf_pb2 as job_conf_pb from oneflow.core.job import sbp_parallel_pb2 as sbp_parallel_pb from oneflow.core.operator import interface_blob_conf_pb2 as interface_blob_conf_pb from oneflow.core.register import logical_blob_id_pb2 as logical_blob_id_pb from oneflow.core.serving import saved_model_pb2 as saved_model_pb class ModelBuilder(object): DEFAULT_CHECKPOINT_DIR = "variables" DEFAULT_SAVED_MODEL_FILE_BASENAME = "saved_model" def __init__(self, save_path: str): if not isinstance(save_path, str): raise ValueError( "param 'save_path' must be str, but got {}".format(save_path) ) self.version_ = None self.checkpoint_dir_ = self.DEFAULT_CHECKPOINT_DIR self.saved_model_dir_ = save_path self.saved_model_pb_filename_ = "{}.pb".format( self.DEFAULT_SAVED_MODEL_FILE_BASENAME ) self.saved_model_pbtxt_filename_ = "{}.prototxt".format( self.DEFAULT_SAVED_MODEL_FILE_BASENAME ) self.saved_model_proto_ = saved_model_pb.SavedModel() self.graph_builders_ = {} @property def proto(self): return self.saved_model_proto_ def ModelName(self, model_name: str): assert isinstance(model_name, str) self.proto.name = model_name return self def Version(self, version: int): assert isinstance(version, int) self.version_ = version return self def AddFunction(self, func): func_name = func.__name__ if func_name in self.graph_builders_: raise ValueError("function with name {} already exists".format(func_name)) graph_builder = GraphBuilder(func_name, self) self.graph_builders_[func_name] = graph_builder if not self.proto.HasField("default_graph_name"): self.proto.default_graph_name = func_name return graph_builder def _check_input_output_name_conflict(self): name_set = set() lbn_set = set() def check_name_conflict(name, interface_def): if name in name_set: raise ValueError("input conflict, {} already exist".format(name)) name_set.add(name) lbn = Lbi2Lbn(interface_def.lbi) if lbn in lbn_set: raise ValueError( "input conflict, {} already bind to other input".format(lbn) ) lbn_set.add(lbn) for (_, graph_def) in self.proto.graphs.items(): for (_, signature_def) in graph_def.signatures.items(): for (input_name, input_def) in signature_def.inputs.items(): check_name_conflict(input_name, input_def) for (output_name, output_def) in signature_def.outputs.items(): check_name_conflict(output_name, output_def) @session_ctx.try_init_default_session def Save(self, save_model_before_graph_complete: bool = True): self._check_input_output_name_conflict() for (_, graph_builder) in self.graph_builders_.items(): if not graph_builder.finished: graph_builder.Finish() sess = session_ctx.GetDefaultSession() for (graph_name, graph_def) in self.proto.graphs.items(): job = sess.Job( graph_name if save_model_before_graph_complete else graph_name + "_after_complete" ) graph_def.op_list.extend(list(job.net.op)) if not os.path.exists(self.saved_model_dir_): os.makedirs(self.saved_model_dir_) if self.version_ is None: raise ValueError("model version is not set") version_dir = os.path.join(self.saved_model_dir_, str(self.version_)) if os.path.exists(version_dir): raise ValueError( 'Directory of model "{}" version "{}" already exist.'.format( self.saved_model_dir_, self.version_ ) ) os.makedirs(version_dir) self.proto.version = self.version_ checkpoint_path = os.path.join(version_dir, self.checkpoint_dir_) flow.checkpoint.save(checkpoint_path) self.proto.checkpoint_dir = self.checkpoint_dir_ saved_model_pb_path = os.path.join(version_dir, self.saved_model_pb_filename_) with open(saved_model_pb_path, "wb") as writer: writer.write(self.saved_model_proto_.SerializeToString()) saved_model_pbtxt_path = os.path.join( version_dir, self.saved_model_pbtxt_filename_ ) with open(saved_model_pbtxt_path, "wt") as writer: writer.write(text_format.MessageToString(self.saved_model_proto_)) class GraphBuilder(object): def __init__(self, name: str, model_builder: typing.Optional[ModelBuilder] = None): if not isinstance(name, str): raise ValueError("param 'name' must be str, but got {}".format(name)) if not isinstance(model_builder, ModelBuilder) and model_builder is not None: raise ValueError( "param 'model_builder' must be a type of ModelBuilder or None" ) if model_builder is not None: if name in model_builder.proto.graphs: raise ValueError( "graph function ({}) is already added to model ({})".format( name, model_builder.proto.name ) ) self.proto_ = model_builder.proto.graphs[name] self.owner_ = model_builder else: self.proto_ = saved_model_pb.GraphDef() self.owner_ = None self.name_ = name self.finished_ = False self.signature_builders_ = {} @property def name(self): return self.name_ @property def proto(self): return self.proto_ @property def finished(self): return self.finished_ def AddSignature(self, signature_name: str): assert isinstance(signature_name, str) if signature_name in self.signature_builders_: raise ValueError("signature name {} already exists".format(signature_name)) signature_builder = SignatureBuilder(signature_name, self) self.signature_builders_[signature_name] = signature_builder if not self.proto.HasField("default_signature_name"): self.proto.default_signature_name = signature_name return signature_builder def Finish(self): assert self.finished is False for (_, signature_def) in self.proto.signatures.items(): for (_, input_def) in signature_def.inputs.items(): input_lbn = Lbi2Lbn(input_def.lbi) oneflow._oneflow_internal.JobBuildAndInferCtx_CheckLbnValidAndExist( self.name, input_lbn ) GetInterfaceBlobConf(self.name, input_lbn, input_def.blob_conf) for (_, output_def) in signature_def.outputs.items(): oneflow._oneflow_internal.JobBuildAndInferCtx_CheckLbnValidAndExist( self.name, Lbi2Lbn(output_def.lbi) ) self.finished_ = True def OwnerModelBuilder(self): return self.owner_ def AsDefault(self): if self.owner_ is not None: self.owner_.proto.default_graph_name = self.name return self class SignatureBuilder(object): def __init__(self, name: str, graph_builder: typing.Optional[GraphBuilder] = None): if not isinstance(name, str): raise ValueError("param 'name' must be str, but got {}".format(name)) if not isinstance(graph_builder, GraphBuilder) and graph_builder is not None: raise ValueError( "param 'graph_builder' must be a type of GraphBuilder or None" ) if graph_builder is not None: if name in graph_builder.proto.signatures: raise ValueError( "signature ({}) already exist in graph ({})".format( name, graph_builder.name ) ) self.proto_ = graph_builder.proto.signatures[name] self.owner_ = graph_builder else: self.proto_ = job_conf_pb.JobSignatureDef() self.owner_ = None self.name_ = name @property def name(self): return self.name_ @property def proto(self): return self.proto_ def Input(self, input_name: str, lbn: str): assert isinstance(input_name, str) assert isinstance(lbn, str) assert "/" in lbn if input_name in self.proto.inputs: raise ValueError( "input_name ({}) already exist in signature ({}) of graph ({})".format( input_name, self.name, self.graph_builder_.name ) ) input_def = self.proto.inputs[input_name] Lbn2Lbi(lbn, input_def.lbi) return self def Output(self, output_name: str, lbn: str): assert isinstance(output_name, str) assert isinstance(lbn, str) assert "/" in lbn if output_name in self.proto.outputs: raise ValueError( "output_name ({}) already exist in signature ({}) of graph ({})".format( output_name, self.name, self.graph_builder_.name ) ) output_def = self.proto.outputs[output_name] Lbn2Lbi(lbn, output_def.lbi) return self def OwnerGraphBuilder(self): return self.owner_ def AsDefault(self): if self.owner_ is not None: self.owner_.proto.default_signature_name = self.name return self def GetInterfaceBlobConf(job_name, lbn, blob_conf=None): assert isinstance(job_name, str) assert isinstance(lbn, str) if blob_conf is None: blob_conf = interface_blob_conf_pb.InterfaceBlobConf() else: assert isinstance(blob_conf, interface_blob_conf_pb.InterfaceBlobConf) shape = c_api_util.JobBuildAndInferCtx_GetStaticShape(job_name, lbn) dtype = c_api_util.JobBuildAndInferCtx_GetDataType(job_name, lbn) split_axis = c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView( job_name, lbn ) is_dynamic = c_api_util.JobBuildAndInferCtx_IsDynamic(job_name, lbn) blob_conf.shape.dim.extend(shape) blob_conf.data_type = dtype if split_axis is not None: sbp_parallel = sbp_parallel_pb.SbpParallel() sbp_parallel.split_parallel.axis = split_axis blob_conf.parallel_distribution.sbp_parallel.extend([sbp_parallel]) blob_conf.is_dynamic = is_dynamic return blob_conf def Lbn2Lbi(lbn, lbi=None): assert isinstance(lbn, str) assert "/" in lbn, 'invalid lbn "{}"'.format(lbn) [op_name, blob_name] = lbn.split("/") if lbi is None: lbi = logical_blob_id_pb.LogicalBlobId() lbi.op_name = op_name lbi.blob_name = blob_name return lbi def Lbi2Lbn(lbi): assert isinstance(lbi, logical_blob_id_pb.LogicalBlobId) return "{}/{}".format(lbi.op_name, lbi.blob_name)
[ "oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetDataType", "oneflow.compatible.single_client.framework.session_context.GetDefaultSession", "oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetStaticShape", "oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView", "oneflow.core.serving.saved_model_pb2.SavedModel", "oneflow.compatible.single_client.checkpoint.save", "oneflow.core.operator.interface_blob_conf_pb2.InterfaceBlobConf", "oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_IsDynamic", "oneflow.core.register.logical_blob_id_pb2.LogicalBlobId", "oneflow.core.job.sbp_parallel_pb2.SbpParallel", "oneflow.core.job.job_conf_pb2.JobSignatureDef", "oneflow.core.serving.saved_model_pb2.GraphDef" ]
[((11040, 11100), 'oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetStaticShape', 'c_api_util.JobBuildAndInferCtx_GetStaticShape', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (11085, 11100), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((11113, 11170), 'oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetDataType', 'c_api_util.JobBuildAndInferCtx_GetDataType', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (11155, 11170), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((11188, 11262), 'oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView', 'c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (11247, 11262), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((11294, 11349), 'oneflow.compatible.single_client.framework.c_api_util.JobBuildAndInferCtx_IsDynamic', 'c_api_util.JobBuildAndInferCtx_IsDynamic', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (11334, 11349), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((1995, 2022), 'oneflow.core.serving.saved_model_pb2.SavedModel', 'saved_model_pb.SavedModel', ([], {}), '()\n', (2020, 2022), True, 'from oneflow.core.serving import saved_model_pb2 as saved_model_pb\n'), ((4126, 4157), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4155, 4157), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((4733, 4760), 'os.path.exists', 'os.path.exists', (['version_dir'], {}), '(version_dir)\n', (4747, 4760), False, 'import os\n'), ((4967, 4991), 'os.makedirs', 'os.makedirs', (['version_dir'], {}), '(version_dir)\n', (4978, 4991), False, 'import os\n'), ((5061, 5108), 'os.path.join', 'os.path.join', (['version_dir', 'self.checkpoint_dir_'], {}), '(version_dir, self.checkpoint_dir_)\n', (5073, 5108), False, 'import os\n'), ((5117, 5154), 'oneflow.compatible.single_client.checkpoint.save', 'flow.checkpoint.save', (['checkpoint_path'], {}), '(checkpoint_path)\n', (5137, 5154), True, 'from oneflow.compatible import single_client as flow\n'), ((5242, 5298), 'os.path.join', 'os.path.join', (['version_dir', 'self.saved_model_pb_filename_'], {}), '(version_dir, self.saved_model_pb_filename_)\n', (5254, 5298), False, 'import os\n'), ((5458, 5517), 'os.path.join', 'os.path.join', (['version_dir', 'self.saved_model_pbtxt_filename_'], {}), '(version_dir, self.saved_model_pbtxt_filename_)\n', (5470, 5517), False, 'import os\n'), ((10896, 10938), 'oneflow.core.operator.interface_blob_conf_pb2.InterfaceBlobConf', 'interface_blob_conf_pb.InterfaceBlobConf', ([], {}), '()\n', (10936, 10938), True, 'from oneflow.core.operator import interface_blob_conf_pb2 as interface_blob_conf_pb\n'), ((11474, 11503), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (11501, 11503), True, 'from oneflow.core.job import sbp_parallel_pb2 as sbp_parallel_pb\n'), ((11885, 11919), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_pb.LogicalBlobId', ([], {}), '()\n', (11917, 11919), True, 'from oneflow.core.register import logical_blob_id_pb2 as logical_blob_id_pb\n'), ((4467, 4504), 'os.path.exists', 'os.path.exists', (['self.saved_model_dir_'], {}), '(self.saved_model_dir_)\n', (4481, 4504), False, 'import os\n'), ((4518, 4552), 'os.makedirs', 'os.makedirs', (['self.saved_model_dir_'], {}), '(self.saved_model_dir_)\n', (4529, 4552), False, 'import os\n'), ((6563, 6588), 'oneflow.core.serving.saved_model_pb2.GraphDef', 'saved_model_pb.GraphDef', ([], {}), '()\n', (6586, 6588), True, 'from oneflow.core.serving import saved_model_pb2 as saved_model_pb\n'), ((9248, 9277), 'oneflow.core.job.job_conf_pb2.JobSignatureDef', 'job_conf_pb.JobSignatureDef', ([], {}), '()\n', (9275, 9277), True, 'from oneflow.core.job import job_conf_pb2 as job_conf_pb\n'), ((5624, 5676), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['self.saved_model_proto_'], {}), '(self.saved_model_proto_)\n', (5651, 5676), False, 'from google.protobuf import text_format\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import oneflow as flow import os @unittest.skipIf(flow.sysconfig.has_rpc_backend_grpc() == False, "lacks grpc") @flow.unittest.skip_unless_1n4d() @unittest.skipIf( os.getenv("ONEFLOW_TEST_GITHUB_HOSTED"), "this will fail because github hosted VM has only two CPU cores", ) class TestMultiProcess(flow.unittest.TestCase): def test_multi_process(test_case): flow.config.gpu_device_num(4) func_config = flow.FunctionConfig() func_config.concurrency_width(1) @flow.global_function() def Foo(): with flow.scope.placement("gpu", "0:0-3"): x = flow.get_variable( "x", shape=(2, 5), dtype=flow.float, initializer=flow.random_uniform_initializer(minval=0, maxval=1), trainable=False, ) return x of_ret = Foo().get() test_case.assertEqual(of_ret.numpy().shape, (2, 5)) def test_worker_to_master_communication(test_case): flow.config.gpu_device_num(4) func_config = flow.FunctionConfig() func_config.concurrency_width(1) @flow.global_function() def Foo(): with flow.scope.placement("gpu", "0:0"): x = flow.get_variable( "x", shape=(2, 5), dtype=flow.float, initializer=flow.random_uniform_initializer(minval=0, maxval=1), trainable=False, ) with flow.scope.placement("gpu", "0:3"): y = flow.get_variable( "y", shape=(2, 5), dtype=flow.float, initializer=flow.constant_initializer(0), trainable=False, ) flow.assign(y, x) return y of_ret = Foo().get() test_case.assertEqual(of_ret.numpy().shape, (2, 5)) def test_worker_to_worker_communication(test_case): flow.config.gpu_device_num(4) func_config = flow.FunctionConfig() func_config.concurrency_width(1) @flow.global_function() def Foo(): with flow.scope.placement("gpu", "0:1"): x = flow.get_variable( "x", shape=(2, 5), dtype=flow.float, initializer=flow.random_uniform_initializer(minval=0, maxval=1), trainable=False, ) with flow.scope.placement("gpu", "0:2"): y = flow.get_variable( "y", shape=(2, 5), dtype=flow.float, initializer=flow.constant_initializer(0), trainable=False, ) flow.assign(y, x) return y of_ret = Foo().get() test_case.assertEqual(of_ret.numpy().shape, (2, 5)) if __name__ == "__main__": unittest.main()
[ "oneflow.FunctionConfig", "oneflow.assign", "oneflow.sysconfig.has_rpc_backend_grpc", "oneflow.random_uniform_initializer", "oneflow.constant_initializer", "oneflow.unittest.skip_unless_1n4d", "oneflow.global_function", "oneflow.scope.placement", "oneflow.config.gpu_device_num" ]
[((721, 753), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (751, 753), True, 'import oneflow as flow\n'), ((776, 815), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_GITHUB_HOSTED"""'], {}), "('ONEFLOW_TEST_GITHUB_HOSTED')\n", (785, 815), False, 'import os\n'), ((3650, 3665), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3663, 3665), False, 'import unittest\n'), ((984, 1013), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(4)'], {}), '(4)\n', (1010, 1013), True, 'import oneflow as flow\n'), ((1036, 1057), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1055, 1057), True, 'import oneflow as flow\n'), ((1109, 1131), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1129, 1131), True, 'import oneflow as flow\n'), ((1658, 1687), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(4)'], {}), '(4)\n', (1684, 1687), True, 'import oneflow as flow\n'), ((1710, 1731), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1729, 1731), True, 'import oneflow as flow\n'), ((1783, 1805), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1803, 1805), True, 'import oneflow as flow\n'), ((2670, 2699), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(4)'], {}), '(4)\n', (2696, 2699), True, 'import oneflow as flow\n'), ((2722, 2743), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2741, 2743), True, 'import oneflow as flow\n'), ((2795, 2817), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2815, 2817), True, 'import oneflow as flow\n'), ((658, 695), 'oneflow.sysconfig.has_rpc_backend_grpc', 'flow.sysconfig.has_rpc_backend_grpc', ([], {}), '()\n', (693, 695), True, 'import oneflow as flow\n'), ((1168, 1204), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-3"""'], {}), "('gpu', '0:0-3')\n", (1188, 1204), True, 'import oneflow as flow\n'), ((1842, 1876), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (1862, 1876), True, 'import oneflow as flow\n'), ((2171, 2205), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:3"""'], {}), "('gpu', '0:3')\n", (2191, 2205), True, 'import oneflow as flow\n'), ((2476, 2493), 'oneflow.assign', 'flow.assign', (['y', 'x'], {}), '(y, x)\n', (2487, 2493), True, 'import oneflow as flow\n'), ((2854, 2888), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:1"""'], {}), "('gpu', '0:1')\n", (2874, 2888), True, 'import oneflow as flow\n'), ((3183, 3217), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:2"""'], {}), "('gpu', '0:2')\n", (3203, 3217), True, 'import oneflow as flow\n'), ((3488, 3505), 'oneflow.assign', 'flow.assign', (['y', 'x'], {}), '(y, x)\n', (3499, 3505), True, 'import oneflow as flow\n'), ((1374, 1425), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(1)'}), '(minval=0, maxval=1)\n', (1405, 1425), True, 'import oneflow as flow\n'), ((2046, 2097), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(1)'}), '(minval=0, maxval=1)\n', (2077, 2097), True, 'import oneflow as flow\n'), ((2375, 2403), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2400, 2403), True, 'import oneflow as flow\n'), ((3058, 3109), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(1)'}), '(minval=0, maxval=1)\n', (3089, 3109), True, 'import oneflow as flow\n'), ((3387, 3415), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3412, 3415), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict import numpy as np import oneflow.experimental as flow from test_util import GenArgList def _test_less_normal(test_case, device): input1 = flow.Tensor( np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device) ) input2 = flow.Tensor( np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device) ) of_out = flow.lt(input1, input2) np_out = np.less(input1.numpy(), input2.numpy()) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out)) def _test_less_symbol(test_case, device): input1 = flow.Tensor( np.array([1, 1, 4]).astype(np.float32), dtype=flow.float32, device=flow.device(device), ) input2 = flow.Tensor( np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32, device=flow.device(device), ) of_out = input1 < input2 np_out = np.less(input1.numpy(), input2.numpy()) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out)) def _test_less_int_scalar(test_case, device): np_arr = np.random.randn(2, 3, 4, 5) input1 = flow.Tensor(np_arr, dtype=flow.float32, device=flow.device(device)) input2 = 1 of_out = input1 < input2 np_out = np.less(np_arr, input2) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out)) def _test_less_int_tensor_int_scalr(test_case, device): np_arr = np.random.randint(2, size=(2, 3, 4, 5)) input1 = flow.Tensor(np_arr, dtype=flow.int, device=flow.device(device)) input2 = 1 of_out = input1 < input2 np_out = np.less(np_arr, input2) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out)) def _test_less_float_scalar(test_case, device): np_arr = np.random.randn(3, 2, 5, 7) input1 = flow.Tensor(np_arr, dtype=flow.float32, device=flow.device(device)) input2 = 2.3 of_out = input1 < input2 np_out = np.less(np_arr, input2) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out)) @unittest.skipIf( not flow.unittest.env.eager_execution_enabled(), ".numpy() doesn't work in lazy mode", ) class TestLess(flow.unittest.TestCase): def test_less(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_less_normal, _test_less_symbol, _test_less_int_scalar, _test_less_int_tensor_int_scalr, _test_less_float_scalar, ] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) if __name__ == "__main__": unittest.main()
[ "oneflow.experimental.lt", "oneflow.experimental.unittest.env.eager_execution_enabled", "oneflow.experimental.device" ]
[((1021, 1044), 'oneflow.experimental.lt', 'flow.lt', (['input1', 'input2'], {}), '(input1, input2)\n', (1028, 1044), True, 'import oneflow.experimental as flow\n'), ((1703, 1730), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (1718, 1730), True, 'import numpy as np\n'), ((1869, 1892), 'numpy.less', 'np.less', (['np_arr', 'input2'], {}), '(np_arr, input2)\n', (1876, 1892), True, 'import numpy as np\n'), ((2029, 2068), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(2, 3, 4, 5)'}), '(2, size=(2, 3, 4, 5))\n', (2046, 2068), True, 'import numpy as np\n'), ((2203, 2226), 'numpy.less', 'np.less', (['np_arr', 'input2'], {}), '(np_arr, input2)\n', (2210, 2226), True, 'import numpy as np\n'), ((2355, 2382), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)', '(5)', '(7)'], {}), '(3, 2, 5, 7)\n', (2370, 2382), True, 'import numpy as np\n'), ((2523, 2546), 'numpy.less', 'np.less', (['np_arr', 'input2'], {}), '(np_arr, input2)\n', (2530, 2546), True, 'import numpy as np\n'), ((3213, 3228), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3226, 3228), False, 'import unittest\n'), ((810, 837), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (825, 837), True, 'import numpy as np\n'), ((926, 953), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (941, 953), True, 'import numpy as np\n'), ((2818, 2831), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2829, 2831), False, 'from collections import OrderedDict\n'), ((3118, 3138), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3128, 3138), False, 'from test_util import GenArgList\n'), ((2640, 2683), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2681, 2683), True, 'import oneflow.experimental as flow\n'), ((866, 885), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (877, 885), True, 'import oneflow.experimental as flow\n'), ((982, 1001), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (993, 1001), True, 'import oneflow.experimental as flow\n'), ((1324, 1343), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1335, 1343), True, 'import oneflow.experimental as flow\n'), ((1468, 1487), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1479, 1487), True, 'import oneflow.experimental as flow\n'), ((1791, 1810), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1802, 1810), True, 'import oneflow.experimental as flow\n'), ((2125, 2144), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2136, 2144), True, 'import oneflow.experimental as flow\n'), ((2443, 2462), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2454, 2462), True, 'import oneflow.experimental as flow\n'), ((1241, 1260), 'numpy.array', 'np.array', (['[1, 1, 4]'], {}), '([1, 1, 4])\n', (1249, 1260), True, 'import numpy as np\n'), ((1385, 1404), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1393, 1404), True, 'import numpy as np\n')]
import random import oneflow as flow class PromptSpell(flow.nn.Module): def __init__(self, spell_length, hidden_size, spell_func): super(PromptSpell, self).__init__() self.spell_length = spell_length self.hidden_size = hidden_size self.spell_embeddings = flow.nn.Embedding(self.spell_length, self.hidden_size) self.spell_func = spell_func if self.spell_func == "lstm": self.lstm_head = flow.nn.LSTM(input_size=self.hidden_size, hidden_size=self.hidden_size, num_layers=2, # dropout=self.lstm_dropout, bidirectional=True, batch_first=True) # .to(torch.device("cuda")) self.mlp_head = flow.nn.Sequential(flow.nn.Linear(2 * self.hidden_size, self.hidden_size), flow.nn.ReLU(), flow.nn.Linear(self.hidden_size, self.hidden_size)) elif self.spell_func == "mlp": self.mlp_head = flow.nn.Sequential(flow.nn.Linear(self.hidden_size, self.hidden_size), flow.nn.ReLU(), flow.nn.Linear(self.hidden_size, self.hidden_size)) elif self.spell_func != "none": raise NotImplementedError("Prompt function " + self.spell_func) def init_embedding(self, word_embeddings=None, task_tokens=None): num_words = 5000 with flow.no_grad(): for i in range(self.spell_length): rand_token = random.randrange(num_words) if task_tokens is None: target_embedding = word_embeddings[rand_token] else: word_embedding = word_embeddings[rand_token] task_token = random.choice(task_tokens) task_embedding = word_embeddings[task_token] ratio = random.random() target_embedding = word_embedding * ratio + task_embedding * (1 - ratio) self.spell_embeddings.weight.data[i] = target_embedding def forward(self): prompt_embeds = self.spell_embeddings.weight.unsqueeze(0) if self.spell_func == "lstm": prompt_embeds = self.lstm_head(prompt_embeds)[0] if self.spell_func == "lstm" or self.spell_func == "mlp": prompt_embeds = self.mlp_head(prompt_embeds) return prompt_embeds
[ "oneflow.nn.ReLU", "oneflow.nn.Linear", "oneflow.nn.LSTM", "oneflow.nn.Embedding", "oneflow.no_grad" ]
[((293, 347), 'oneflow.nn.Embedding', 'flow.nn.Embedding', (['self.spell_length', 'self.hidden_size'], {}), '(self.spell_length, self.hidden_size)\n', (310, 347), True, 'import oneflow as flow\n'), ((452, 579), 'oneflow.nn.LSTM', 'flow.nn.LSTM', ([], {'input_size': 'self.hidden_size', 'hidden_size': 'self.hidden_size', 'num_layers': '(2)', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=self.hidden_size, hidden_size=self.hidden_size,\n num_layers=2, bidirectional=True, batch_first=True)\n', (464, 579), True, 'import oneflow as flow\n'), ((1643, 1657), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (1655, 1657), True, 'import oneflow as flow\n'), ((896, 950), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(2 * self.hidden_size)', 'self.hidden_size'], {}), '(2 * self.hidden_size, self.hidden_size)\n', (910, 950), True, 'import oneflow as flow\n'), ((1000, 1014), 'oneflow.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (1012, 1014), True, 'import oneflow as flow\n'), ((1064, 1114), 'oneflow.nn.Linear', 'flow.nn.Linear', (['self.hidden_size', 'self.hidden_size'], {}), '(self.hidden_size, self.hidden_size)\n', (1078, 1114), True, 'import oneflow as flow\n'), ((1735, 1762), 'random.randrange', 'random.randrange', (['num_words'], {}), '(num_words)\n', (1751, 1762), False, 'import random\n'), ((1202, 1252), 'oneflow.nn.Linear', 'flow.nn.Linear', (['self.hidden_size', 'self.hidden_size'], {}), '(self.hidden_size, self.hidden_size)\n', (1216, 1252), True, 'import oneflow as flow\n'), ((1302, 1316), 'oneflow.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (1314, 1316), True, 'import oneflow as flow\n'), ((1366, 1416), 'oneflow.nn.Linear', 'flow.nn.Linear', (['self.hidden_size', 'self.hidden_size'], {}), '(self.hidden_size, self.hidden_size)\n', (1380, 1416), True, 'import oneflow as flow\n'), ((1990, 2016), 'random.choice', 'random.choice', (['task_tokens'], {}), '(task_tokens)\n', (2003, 2016), False, 'import random\n'), ((2110, 2125), 'random.random', 'random.random', ([], {}), '()\n', (2123, 2125), False, 'import random\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re import unittest import oneflow as flow import oneflow.unittest from oneflow.test_utils.automated_test_util import * class TestBiasAddError(flow.unittest.TestCase): def test_bias_add_dimension_match_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((5,), dtype=flow.float32) out = flow._C.bias_add(x, bias, axis=1) test_case.assertTrue( "The size of tensor x (4,4) must match the size of tensor b (5,) at dimension 1" in str(ctx.exception) ) def test_bias_add_index_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((5,), dtype=flow.float32) out = flow._C.bias_add(x, bias, axis=3) test_case.assertTrue( "Dimension out of range (expected to be in range of [-2,1], but got 3)" in str(ctx.exception) ) class TestCrossEntropyError(flow.unittest.TestCase): def test_cross_entropy_reduction_type_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) target = flow.ones((4, 4), dtype=flow.float32) out = flow._C.cross_entropy(x, target, None, 0, "just_test") test_case.assertTrue( "Reduction should be none, sum or mean." in str(ctx.exception) ) class TestCTCLossError(flow.unittest.TestCase): def test_ctcloss_reduction_type_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((5, 2, 3), dtype=flow.float32) targets = flow.tensor([[1, 2, 2], [1, 2, 2]], dtype=flow.int32) input_lengths = flow.tensor([5, 5], dtype=flow.int32) target_lengths = flow.tensor([3, 3], dtype=flow.int32) max_target_length = 0 if targets.ndim == 1: max_target_length = target_lengths.max().item() elif targets.ndim == 2: max_target_length = targets.shape[1] loss = flow._C.ctc_loss( x, targets, input_lengths, target_lengths, max_target_length, blank=0, zero_infinity=False, reduction="just_test", ) test_case.assertTrue( "Reduction should be none, sum or mean." in str(ctx.exception) ) class TestPadError(flow.unittest.TestCase): def test_pad_size_attribute_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 1), dtype=flow.float32) out = flow._C.pad(x, (1, 1, 1, 1, 1)) test_case.assertTrue( "Pad size should less than or equal to input axes * 2." in str(ctx.exception) ) def test_pad_size_mod2_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 1), dtype=flow.float32) out = flow._C.pad(x, (1, 1, 1,)) test_case.assertTrue( "Length of pad must be even but instead it equals 3" in str(ctx.exception) ) def test_reflect_pad_size_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 1, 2, 2), dtype=flow.float32) out = flow._C.pad(x, (4, 4, 4, 4), mode="reflect") test_case.assertTrue( "padding size should be less than the corresponding input dimension!" in str(ctx.exception) ) def test_pad_mode_error(test_case): with test_case.assertRaises(NotImplementedError) as ctx: x = flow.ones((1, 1, 2, 2), dtype=flow.float32) out = flow._C.pad(x, (4, 4, 4, 4), mode="test") test_case.assertTrue( "Pad mode is test, but only constant, reflect and replicate are valid." in str(ctx.exception) ) class TestFusedMLPError(flow.unittest.TestCase): def test_fuse_mlp_weight_size_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) bias = flow.ones((4,), dtype=flow.float32) out = flow._C.fused_mlp(x, [], [bias], False) test_case.assertTrue( "The number of weights should be greater equal than 1" in str(ctx.exception) ) def test_fuse_mlp_weight_bias_size_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w1 = flow.ones((4, 4), dtype=flow.float32) w2 = flow.ones((4, 4), dtype=flow.float32) bias1 = flow.ones((4,), dtype=flow.float32) out = flow._C.fused_mlp(x, [w1, w2], [bias1], False) test_case.assertTrue( "The number of weights should be equal to biases" in str(ctx.exception) ) def test_fuse_mlp_weight_numaxes_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w1 = flow.ones((4,), dtype=flow.float32) bias1 = flow.ones((4,), dtype=flow.float32) out = flow._C.fused_mlp(x, [w1,], [bias1,], False) test_case.assertTrue("Weight's dim size should == 2" in str(ctx.exception)) def test_fuse_mlp_bias_numaxes_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w1 = flow.ones((4, 4), dtype=flow.float32) bias1 = flow.ones((4, 4), dtype=flow.float32) out = flow._C.fused_mlp(x, [w1,], [bias1,], False) test_case.assertTrue("Bias's dim size should == 1" in str(ctx.exception)) def test_fuse_mlp_bias_first_dim_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w1 = flow.ones((6, 4), dtype=flow.float32) bias1 = flow.ones((5), dtype=flow.float32) out = flow._C.fused_mlp(x, [w1,], [bias1,], False) test_case.assertTrue( "Bias's dim is not equal to weight's first dim." in str(ctx.exception) ) def test_fuse_mlp_weight_second_dim_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((2, 4), dtype=flow.float32) w1 = flow.ones((3, 6), dtype=flow.float32) bias1 = flow.ones((3), dtype=flow.float32) out = flow._C.fused_mlp(x, [w1,], [bias1,], False) test_case.assertTrue( "weight's second dim should be equal to input's second dim." in str(ctx.exception) ) class TestL2NormalizeError(flow.unittest.TestCase): def test_l2normalize_axis_error1(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((3, 3), dtype=flow.float32) out = flow._C.normalize(x, dim=3, use_l2_norm_kernel=True) test_case.assertTrue("Axis should < 2 but axis is 3 now." in str(ctx.exception)) def test_l2normalize_axis_error2(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((3, 3), dtype=flow.float32) out = flow._C.normalize(x, dim=-3, use_l2_norm_kernel=True) test_case.assertTrue( "Axis should >=0 but axis is -1 now." in str(ctx.exception) ) class TestLossBaseFunctorError(flow.unittest.TestCase): def test_loss_base_reduction_type_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) target = flow.ones((4, 4), dtype=flow.float32) out = flow._C.mse_loss(x, target, "just_test") test_case.assertTrue( "Reduction should be none, sum or mean." in str(ctx.exception) ) class TestMatmulError(flow.unittest.TestCase): def test_matmul_dimension_error1(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4,), dtype=flow.float32) w = flow.ones((4, 4), dtype=flow.float32) out = flow._C.matmul(x, w, False, False, 1.0) test_case.assertTrue("Tensor a's dim should >= 2" in str(ctx.exception)) def test_matmul_dimension_error2(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 4), dtype=flow.float32) w = flow.ones((4,), dtype=flow.float32) out = flow._C.matmul(x, w, False, False, 1.0) test_case.assertTrue("Tensor b's dim should >= 2" in str(ctx.exception)) def test_matmul_dimension_error3(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((4, 1, 2, 1), dtype=flow.float32) w = flow.ones((4, 4, 4), dtype=flow.float32) out = flow._C.matmul(x, w, False, False, 1.0) test_case.assertTrue( "Not support number of dimensions of a being less than number of dimensions of b!" in str(ctx.exception) ) class TestPixelShuffleError(flow.unittest.TestCase): def test_pixel_shuffle_4D_input_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 8, 4, 4, 1), dtype=flow.float32) out = flow._C.pixel_shuffle(x, 2, 2) test_case.assertTrue("Only Accept 4D Tensor" in str(ctx.exception)) def test_pixel_shuffle_channel_divisble_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 8, 4, 4), dtype=flow.float32) out = flow._C.pixel_shuffle(x, 2, 3) test_case.assertTrue( "The channels of input tensor must be divisible by (upscale_factor * upscale_factor) or (h_upscale_factor * w_upscale_factor)" in str(ctx.exception) ) class TestTripletMarginLossError(flow.unittest.TestCase): def test_triplet_margin_loss_reduce_type_error(test_case): with test_case.assertRaises(Exception) as ctx: anchor = flow.ones((3, 3), dtype=flow.float32) positive = flow.ones((3, 3), dtype=flow.float32) negative = flow.ones((3, 3), dtype=flow.float32) triplet_loss = flow._C.triplet_margin_loss( anchor, positive, negative, margin=0.001, p=2, eps=1e-5, swap=False, reduction="just_test", ) test_case.assertTrue( "Reduction should be none, sum or mean." in str(ctx.exception) ) class TestNormalError(flow.unittest.TestCase): def test_normal_data_type_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow._C.normal(mean=0.0, std=1.0, size=(3, 3), dtype=flow.int32) test_case.assertTrue( "Only support float and double in normal()." in str(ctx.exception) ) def test_normal_out_tensor_data_type_error(test_case): with test_case.assertRaises(RuntimeError) as ctx: out = flow.zeros((3, 3), dtype=flow.float64) x = flow._C.normal( mean=0.0, std=1.0, size=(3, 3), dtype=flow.float32, out=out ) test_case.assertTrue( "data type oneflow.float32 does not match data type of out parameter oneflow.float64" in str(ctx.exception) ) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_normal_out_tensor_device_type_error(test_case): with test_case.assertRaises(RuntimeError) as ctx: out = flow.zeros((3, 3), dtype=flow.float32, device="cuda") x = flow._C.normal( mean=0.0, std=1.0, size=(3, 3), dtype=flow.float32, out=out, device="cpu", ) test_case.assertTrue( "does not match device type of out parameter" in str(ctx.exception) ) class TestNormalizationError(flow.unittest.TestCase): def test_normalization_moving_mean_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 4, 2, 2), dtype=flow.float32) moving_mean = flow.ones((4,), dtype=flow.float32) weight = flow.ones((4,), dtype=flow.float32) bias = flow.ones((4,), dtype=flow.float32) out = flow._C.normalization( x, moving_mean, None, weight, bias, 1, 1e-5, 0.9, False ) test_case.assertTrue( "Both moving_mean and moving_variance should be None or Tensor." in str(ctx.exception) ) def test_normalization_x_input_axes_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1,), dtype=flow.float32) weight = flow.ones((4,), dtype=flow.float32) bias = flow.ones((4,), dtype=flow.float32) out = flow._C.normalization( x, None, None, weight, bias, 1, 1e-5, 0.9, False ) test_case.assertTrue( "NumAxes of x should be greater or equal than 2." in str(ctx.exception) ) def test_normalization_eval_need_moving_statistic_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((1, 2,), dtype=flow.float32) weight = flow.ones((2,), dtype=flow.float32) bias = flow.ones((2,), dtype=flow.float32) out = flow._C.normalization( x, None, None, weight, bias, 1, 1e-5, 0.9, False ) test_case.assertTrue( "Must have moving_mean and moving_variance in eval mode." in str(ctx.exception) ) class TestOnehotError(flow.unittest.TestCase): def test_onehot_error(test_case): with test_case.assertRaises(Exception) as ctx: x = flow.ones((3, 3), dtype=flow.float32) out = flow._C.one_hot(x, 3, 0.9, 0) test_case.assertTrue( "one_hot is only applicable to index tensor." in str(ctx.exception) ) if __name__ == "__main__": unittest.main()
[ "oneflow._C.pad", "oneflow._C.normalize", "oneflow._C.normal", "oneflow._C.bias_add", "oneflow._C.matmul", "oneflow._C.one_hot", "oneflow._C.triplet_margin_loss", "oneflow.zeros", "oneflow._C.fused_mlp", "oneflow._C.ctc_loss", "oneflow._C.pixel_shuffle", "oneflow.tensor", "oneflow._C.mse_loss", "oneflow._C.normalization", "oneflow._C.cross_entropy", "oneflow.ones" ]
[((14950, 14965), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14963, 14965), False, 'import unittest\n'), ((895, 932), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (904, 932), True, 'import oneflow as flow\n'), ((952, 987), 'oneflow.ones', 'flow.ones', (['(5,)'], {'dtype': 'flow.float32'}), '((5,), dtype=flow.float32)\n', (961, 987), True, 'import oneflow as flow\n'), ((1006, 1039), 'oneflow._C.bias_add', 'flow._C.bias_add', (['x', 'bias'], {'axis': '(1)'}), '(x, bias, axis=1)\n', (1022, 1039), True, 'import oneflow as flow\n'), ((1326, 1363), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (1335, 1363), True, 'import oneflow as flow\n'), ((1383, 1418), 'oneflow.ones', 'flow.ones', (['(5,)'], {'dtype': 'flow.float32'}), '((5,), dtype=flow.float32)\n', (1392, 1418), True, 'import oneflow as flow\n'), ((1437, 1470), 'oneflow._C.bias_add', 'flow._C.bias_add', (['x', 'bias'], {'axis': '(3)'}), '(x, bias, axis=3)\n', (1453, 1470), True, 'import oneflow as flow\n'), ((1816, 1853), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (1825, 1853), True, 'import oneflow as flow\n'), ((1875, 1912), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (1884, 1912), True, 'import oneflow as flow\n'), ((1931, 1985), 'oneflow._C.cross_entropy', 'flow._C.cross_entropy', (['x', 'target', 'None', '(0)', '"""just_test"""'], {}), "(x, target, None, 0, 'just_test')\n", (1952, 1985), True, 'import oneflow as flow\n'), ((2277, 2317), 'oneflow.ones', 'flow.ones', (['(5, 2, 3)'], {'dtype': 'flow.float32'}), '((5, 2, 3), dtype=flow.float32)\n', (2286, 2317), True, 'import oneflow as flow\n'), ((2340, 2393), 'oneflow.tensor', 'flow.tensor', (['[[1, 2, 2], [1, 2, 2]]'], {'dtype': 'flow.int32'}), '([[1, 2, 2], [1, 2, 2]], dtype=flow.int32)\n', (2351, 2393), True, 'import oneflow as flow\n'), ((2422, 2459), 'oneflow.tensor', 'flow.tensor', (['[5, 5]'], {'dtype': 'flow.int32'}), '([5, 5], dtype=flow.int32)\n', (2433, 2459), True, 'import oneflow as flow\n'), ((2489, 2526), 'oneflow.tensor', 'flow.tensor', (['[3, 3]'], {'dtype': 'flow.int32'}), '([3, 3], dtype=flow.int32)\n', (2500, 2526), True, 'import oneflow as flow\n'), ((2767, 2902), 'oneflow._C.ctc_loss', 'flow._C.ctc_loss', (['x', 'targets', 'input_lengths', 'target_lengths', 'max_target_length'], {'blank': '(0)', 'zero_infinity': '(False)', 'reduction': '"""just_test"""'}), "(x, targets, input_lengths, target_lengths,\n max_target_length, blank=0, zero_infinity=False, reduction='just_test')\n", (2783, 2902), True, 'import oneflow as flow\n'), ((3324, 3361), 'oneflow.ones', 'flow.ones', (['(1, 1)'], {'dtype': 'flow.float32'}), '((1, 1), dtype=flow.float32)\n', (3333, 3361), True, 'import oneflow as flow\n'), ((3380, 3411), 'oneflow._C.pad', 'flow._C.pad', (['x', '(1, 1, 1, 1, 1)'], {}), '(x, (1, 1, 1, 1, 1))\n', (3391, 3411), True, 'import oneflow as flow\n'), ((3671, 3708), 'oneflow.ones', 'flow.ones', (['(1, 1)'], {'dtype': 'flow.float32'}), '((1, 1), dtype=flow.float32)\n', (3680, 3708), True, 'import oneflow as flow\n'), ((3727, 3752), 'oneflow._C.pad', 'flow._C.pad', (['x', '(1, 1, 1)'], {}), '(x, (1, 1, 1))\n', (3738, 3752), True, 'import oneflow as flow\n'), ((4002, 4045), 'oneflow.ones', 'flow.ones', (['(1, 1, 2, 2)'], {'dtype': 'flow.float32'}), '((1, 1, 2, 2), dtype=flow.float32)\n', (4011, 4045), True, 'import oneflow as flow\n'), ((4064, 4108), 'oneflow._C.pad', 'flow._C.pad', (['x', '(4, 4, 4, 4)'], {'mode': '"""reflect"""'}), "(x, (4, 4, 4, 4), mode='reflect')\n", (4075, 4108), True, 'import oneflow as flow\n'), ((4388, 4431), 'oneflow.ones', 'flow.ones', (['(1, 1, 2, 2)'], {'dtype': 'flow.float32'}), '((1, 1, 2, 2), dtype=flow.float32)\n', (4397, 4431), True, 'import oneflow as flow\n'), ((4450, 4491), 'oneflow._C.pad', 'flow._C.pad', (['x', '(4, 4, 4, 4)'], {'mode': '"""test"""'}), "(x, (4, 4, 4, 4), mode='test')\n", (4461, 4491), True, 'import oneflow as flow\n'), ((4825, 4862), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (4834, 4862), True, 'import oneflow as flow\n'), ((4882, 4917), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (4891, 4917), True, 'import oneflow as flow\n'), ((4936, 4975), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[]', '[bias]', '(False)'], {}), '(x, [], [bias], False)\n', (4953, 4975), True, 'import oneflow as flow\n'), ((5235, 5272), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (5244, 5272), True, 'import oneflow as flow\n'), ((5290, 5327), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (5299, 5327), True, 'import oneflow as flow\n'), ((5345, 5382), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (5354, 5382), True, 'import oneflow as flow\n'), ((5403, 5438), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (5412, 5438), True, 'import oneflow as flow\n'), ((5457, 5503), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[w1, w2]', '[bias1]', '(False)'], {}), '(x, [w1, w2], [bias1], False)\n', (5474, 5503), True, 'import oneflow as flow\n'), ((5756, 5793), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (5765, 5793), True, 'import oneflow as flow\n'), ((5811, 5846), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (5820, 5846), True, 'import oneflow as flow\n'), ((5867, 5902), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (5876, 5902), True, 'import oneflow as flow\n'), ((5921, 5963), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[w1]', '[bias1]', '(False)'], {}), '(x, [w1], [bias1], False)\n', (5938, 5963), True, 'import oneflow as flow\n'), ((6175, 6212), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (6184, 6212), True, 'import oneflow as flow\n'), ((6230, 6267), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (6239, 6267), True, 'import oneflow as flow\n'), ((6288, 6325), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (6297, 6325), True, 'import oneflow as flow\n'), ((6344, 6386), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[w1]', '[bias1]', '(False)'], {}), '(x, [w1], [bias1], False)\n', (6361, 6386), True, 'import oneflow as flow\n'), ((6598, 6635), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (6607, 6635), True, 'import oneflow as flow\n'), ((6653, 6690), 'oneflow.ones', 'flow.ones', (['(6, 4)'], {'dtype': 'flow.float32'}), '((6, 4), dtype=flow.float32)\n', (6662, 6690), True, 'import oneflow as flow\n'), ((6711, 6743), 'oneflow.ones', 'flow.ones', (['(5)'], {'dtype': 'flow.float32'}), '(5, dtype=flow.float32)\n', (6720, 6743), True, 'import oneflow as flow\n'), ((6764, 6806), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[w1]', '[bias1]', '(False)'], {}), '(x, [w1], [bias1], False)\n', (6781, 6806), True, 'import oneflow as flow\n'), ((7063, 7100), 'oneflow.ones', 'flow.ones', (['(2, 4)'], {'dtype': 'flow.float32'}), '((2, 4), dtype=flow.float32)\n', (7072, 7100), True, 'import oneflow as flow\n'), ((7118, 7155), 'oneflow.ones', 'flow.ones', (['(3, 6)'], {'dtype': 'flow.float32'}), '((3, 6), dtype=flow.float32)\n', (7127, 7155), True, 'import oneflow as flow\n'), ((7176, 7208), 'oneflow.ones', 'flow.ones', (['(3)'], {'dtype': 'flow.float32'}), '(3, dtype=flow.float32)\n', (7185, 7208), True, 'import oneflow as flow\n'), ((7229, 7271), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['x', '[w1]', '[bias1]', '(False)'], {}), '(x, [w1], [bias1], False)\n', (7246, 7271), True, 'import oneflow as flow\n'), ((7596, 7633), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (7605, 7633), True, 'import oneflow as flow\n'), ((7652, 7704), 'oneflow._C.normalize', 'flow._C.normalize', (['x'], {'dim': '(3)', 'use_l2_norm_kernel': '(True)'}), '(x, dim=3, use_l2_norm_kernel=True)\n', (7669, 7704), True, 'import oneflow as flow\n'), ((7915, 7952), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (7924, 7952), True, 'import oneflow as flow\n'), ((7971, 8024), 'oneflow._C.normalize', 'flow._C.normalize', (['x'], {'dim': '(-3)', 'use_l2_norm_kernel': '(True)'}), '(x, dim=-3, use_l2_norm_kernel=True)\n', (7988, 8024), True, 'import oneflow as flow\n'), ((8322, 8359), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (8331, 8359), True, 'import oneflow as flow\n'), ((8381, 8418), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (8390, 8418), True, 'import oneflow as flow\n'), ((8437, 8477), 'oneflow._C.mse_loss', 'flow._C.mse_loss', (['x', 'target', '"""just_test"""'], {}), "(x, target, 'just_test')\n", (8453, 8477), True, 'import oneflow as flow\n'), ((8763, 8798), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (8772, 8798), True, 'import oneflow as flow\n'), ((8815, 8852), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (8824, 8852), True, 'import oneflow as flow\n'), ((8871, 8910), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'w', '(False)', '(False)', '(1.0)'], {}), '(x, w, False, False, 1.0)\n', (8885, 8910), True, 'import oneflow as flow\n'), ((9113, 9150), 'oneflow.ones', 'flow.ones', (['(4, 4)'], {'dtype': 'flow.float32'}), '((4, 4), dtype=flow.float32)\n', (9122, 9150), True, 'import oneflow as flow\n'), ((9167, 9202), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (9176, 9202), True, 'import oneflow as flow\n'), ((9221, 9260), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'w', '(False)', '(False)', '(1.0)'], {}), '(x, w, False, False, 1.0)\n', (9235, 9260), True, 'import oneflow as flow\n'), ((9463, 9506), 'oneflow.ones', 'flow.ones', (['(4, 1, 2, 1)'], {'dtype': 'flow.float32'}), '((4, 1, 2, 1), dtype=flow.float32)\n', (9472, 9506), True, 'import oneflow as flow\n'), ((9523, 9563), 'oneflow.ones', 'flow.ones', (['(4, 4, 4)'], {'dtype': 'flow.float32'}), '((4, 4, 4), dtype=flow.float32)\n', (9532, 9563), True, 'import oneflow as flow\n'), ((9582, 9621), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'w', '(False)', '(False)', '(1.0)'], {}), '(x, w, False, False, 1.0)\n', (9596, 9621), True, 'import oneflow as flow\n'), ((9972, 10018), 'oneflow.ones', 'flow.ones', (['(1, 8, 4, 4, 1)'], {'dtype': 'flow.float32'}), '((1, 8, 4, 4, 1), dtype=flow.float32)\n', (9981, 10018), True, 'import oneflow as flow\n'), ((10037, 10067), 'oneflow._C.pixel_shuffle', 'flow._C.pixel_shuffle', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (10058, 10067), True, 'import oneflow as flow\n'), ((10279, 10322), 'oneflow.ones', 'flow.ones', (['(1, 8, 4, 4)'], {'dtype': 'flow.float32'}), '((1, 8, 4, 4), dtype=flow.float32)\n', (10288, 10322), True, 'import oneflow as flow\n'), ((10341, 10371), 'oneflow._C.pixel_shuffle', 'flow._C.pixel_shuffle', (['x', '(2)', '(3)'], {}), '(x, 2, 3)\n', (10362, 10371), True, 'import oneflow as flow\n'), ((10785, 10822), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (10794, 10822), True, 'import oneflow as flow\n'), ((10846, 10883), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (10855, 10883), True, 'import oneflow as flow\n'), ((10907, 10944), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (10916, 10944), True, 'import oneflow as flow\n'), ((10973, 11097), 'oneflow._C.triplet_margin_loss', 'flow._C.triplet_margin_loss', (['anchor', 'positive', 'negative'], {'margin': '(0.001)', 'p': '(2)', 'eps': '(1e-05)', 'swap': '(False)', 'reduction': '"""just_test"""'}), "(anchor, positive, negative, margin=0.001, p=2,\n eps=1e-05, swap=False, reduction='just_test')\n", (11000, 11097), True, 'import oneflow as flow\n'), ((11520, 11584), 'oneflow._C.normal', 'flow._C.normal', ([], {'mean': '(0.0)', 'std': '(1.0)', 'size': '(3, 3)', 'dtype': 'flow.int32'}), '(mean=0.0, std=1.0, size=(3, 3), dtype=flow.int32)\n', (11534, 11584), True, 'import oneflow as flow\n'), ((11841, 11879), 'oneflow.zeros', 'flow.zeros', (['(3, 3)'], {'dtype': 'flow.float64'}), '((3, 3), dtype=flow.float64)\n', (11851, 11879), True, 'import oneflow as flow\n'), ((11896, 11971), 'oneflow._C.normal', 'flow._C.normal', ([], {'mean': '(0.0)', 'std': '(1.0)', 'size': '(3, 3)', 'dtype': 'flow.float32', 'out': 'out'}), '(mean=0.0, std=1.0, size=(3, 3), dtype=flow.float32, out=out)\n', (11910, 11971), True, 'import oneflow as flow\n'), ((12393, 12446), 'oneflow.zeros', 'flow.zeros', (['(3, 3)'], {'dtype': 'flow.float32', 'device': '"""cuda"""'}), "((3, 3), dtype=flow.float32, device='cuda')\n", (12403, 12446), True, 'import oneflow as flow\n'), ((12463, 12556), 'oneflow._C.normal', 'flow._C.normal', ([], {'mean': '(0.0)', 'std': '(1.0)', 'size': '(3, 3)', 'dtype': 'flow.float32', 'out': 'out', 'device': '"""cpu"""'}), "(mean=0.0, std=1.0, size=(3, 3), dtype=flow.float32, out=out,\n device='cpu')\n", (12477, 12556), True, 'import oneflow as flow\n'), ((12969, 13012), 'oneflow.ones', 'flow.ones', (['(1, 4, 2, 2)'], {'dtype': 'flow.float32'}), '((1, 4, 2, 2), dtype=flow.float32)\n', (12978, 13012), True, 'import oneflow as flow\n'), ((13039, 13074), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (13048, 13074), True, 'import oneflow as flow\n'), ((13096, 13131), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (13105, 13131), True, 'import oneflow as flow\n'), ((13151, 13186), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (13160, 13186), True, 'import oneflow as flow\n'), ((13206, 13285), 'oneflow._C.normalization', 'flow._C.normalization', (['x', 'moving_mean', 'None', 'weight', 'bias', '(1)', '(1e-05)', '(0.9)', '(False)'], {}), '(x, moving_mean, None, weight, bias, 1, 1e-05, 0.9, False)\n', (13227, 13285), True, 'import oneflow as flow\n'), ((13597, 13632), 'oneflow.ones', 'flow.ones', (['(1,)'], {'dtype': 'flow.float32'}), '((1,), dtype=flow.float32)\n', (13606, 13632), True, 'import oneflow as flow\n'), ((13654, 13689), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (13663, 13689), True, 'import oneflow as flow\n'), ((13709, 13744), 'oneflow.ones', 'flow.ones', (['(4,)'], {'dtype': 'flow.float32'}), '((4,), dtype=flow.float32)\n', (13718, 13744), True, 'import oneflow as flow\n'), ((13764, 13836), 'oneflow._C.normalization', 'flow._C.normalization', (['x', 'None', 'None', 'weight', 'bias', '(1)', '(1e-05)', '(0.9)', '(False)'], {}), '(x, None, None, weight, bias, 1, 1e-05, 0.9, False)\n', (13785, 13836), True, 'import oneflow as flow\n'), ((14135, 14172), 'oneflow.ones', 'flow.ones', (['(1, 2)'], {'dtype': 'flow.float32'}), '((1, 2), dtype=flow.float32)\n', (14144, 14172), True, 'import oneflow as flow\n'), ((14195, 14230), 'oneflow.ones', 'flow.ones', (['(2,)'], {'dtype': 'flow.float32'}), '((2,), dtype=flow.float32)\n', (14204, 14230), True, 'import oneflow as flow\n'), ((14250, 14285), 'oneflow.ones', 'flow.ones', (['(2,)'], {'dtype': 'flow.float32'}), '((2,), dtype=flow.float32)\n', (14259, 14285), True, 'import oneflow as flow\n'), ((14305, 14377), 'oneflow._C.normalization', 'flow._C.normalization', (['x', 'None', 'None', 'weight', 'bias', '(1)', '(1e-05)', '(0.9)', '(False)'], {}), '(x, None, None, weight, bias, 1, 1e-05, 0.9, False)\n', (14326, 14377), True, 'import oneflow as flow\n'), ((14710, 14747), 'oneflow.ones', 'flow.ones', (['(3, 3)'], {'dtype': 'flow.float32'}), '((3, 3), dtype=flow.float32)\n', (14719, 14747), True, 'import oneflow as flow\n'), ((14766, 14795), 'oneflow._C.one_hot', 'flow._C.one_hot', (['x', '(3)', '(0.9)', '(0)'], {}), '(x, 3, 0.9, 0)\n', (14781, 14795), True, 'import oneflow as flow\n')]
import os import oneflow.experimental as flow import argparse import numpy as np import time from utils.data_utils import load_image from utils.utils import to_numpy, to_tensor, save_images from models.networks import Generator def main(args): test_x, test_y = load_image(args.image_path) test_inp = to_tensor(test_x.astype(np.float32)) test_target = to_tensor(test_y.astype(np.float32)) generator = Generator().to("cuda") start_t = time.time() pretrain_model = flow.load(args.model_path) generator.load_state_dict(pretrain_model) end_t = time.time() print("load params time : {}".format(end_t - start_t)) start_t = time.time() generator.eval() with flow.no_grad(): gout = to_numpy(generator(test_inp), False) end_t = time.time() print("infer time : {}".format(end_t - start_t)) # save images save_images( gout, test_inp.numpy(), test_target.numpy(), path=os.path.join("./testimage.png"), plot_size=1, ) if __name__ == "__main__": flow.enable_eager_execution() parser = argparse.ArgumentParser(description="oneflow PIX2PIX") parser.add_argument("--model_path", type=str, required=True, help="model path") parser.add_argument( "--image_path", type=str, required=True, help="input image path" ) args = parser.parse_args() main(args)
[ "oneflow.experimental.no_grad", "oneflow.experimental.enable_eager_execution", "oneflow.experimental.load" ]
[((267, 294), 'utils.data_utils.load_image', 'load_image', (['args.image_path'], {}), '(args.image_path)\n', (277, 294), False, 'from utils.data_utils import load_image\n'), ((458, 469), 'time.time', 'time.time', ([], {}), '()\n', (467, 469), False, 'import time\n'), ((491, 517), 'oneflow.experimental.load', 'flow.load', (['args.model_path'], {}), '(args.model_path)\n', (500, 517), True, 'import oneflow.experimental as flow\n'), ((576, 587), 'time.time', 'time.time', ([], {}), '()\n', (585, 587), False, 'import time\n'), ((662, 673), 'time.time', 'time.time', ([], {}), '()\n', (671, 673), False, 'import time\n'), ((784, 795), 'time.time', 'time.time', ([], {}), '()\n', (793, 795), False, 'import time\n'), ((1060, 1089), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (1087, 1089), True, 'import oneflow.experimental as flow\n'), ((1103, 1157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""oneflow PIX2PIX"""'}), "(description='oneflow PIX2PIX')\n", (1126, 1157), False, 'import argparse\n'), ((704, 718), 'oneflow.experimental.no_grad', 'flow.no_grad', ([], {}), '()\n', (716, 718), True, 'import oneflow.experimental as flow\n'), ((420, 431), 'models.networks.Generator', 'Generator', ([], {}), '()\n', (429, 431), False, 'from models.networks import Generator\n'), ((967, 998), 'os.path.join', 'os.path.join', (['"""./testimage.png"""'], {}), "('./testimage.png')\n", (979, 998), False, 'import os\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow from oneflow.cuda.type_tensor import * from oneflow.cuda._utils import _get_device_index from typing import Union, Any def is_available() -> bool: r"""Returns a bool indicating if CUDA is currently available.""" # This function never throws and returns 0 if driver is missing or can't # be initialized return device_count() > 0 def device_count() -> int: r"""Returns the number of GPUs available.""" return flow._oneflow_internal.CudaGetDeviceCount() def current_device() -> int: r"""Returns local rank as device index.""" return flow._oneflow_internal.GetCudaDeviceIndex() def manual_seed_all(seed) -> None: r"""The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.cuda.manual_seed_all.html. Sets the seed for generating random numbers on all GPUs. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. """ seed = int(seed) flow._oneflow_internal.ManualSeedAllCudaGenerator(seed) def manual_seed(seed: int) -> None: r"""The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.cuda.manual_seed.html. Sets the seed for generating random numbers for the current GPU. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. .. warning:: If you are working with a multi-GPU model, this function is insufficient to get determinism. To seed all GPUs, use :func:`manual_seed_all`. """ seed = int(seed) idx = current_device() flow._oneflow_internal.manual_seed(seed, "cuda", idx) def set_device(device: Union[flow.device, str, int]) -> None: r"""The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.cuda.set_device.html. Sets the current device. Usage of this function is discouraged in favor of :attr:`device`. In most cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. Args: device (flow.device or int): selected device. This function is a no-op if this argument is negative. """ if flow.env.get_world_size() > 0: raise ValueError("set_device() function is disabled in multi-device setting") device_idx = _get_device_index(device) if device_idx >= 0: flow._oneflow_internal.SetCudaDeviceIndex(device_idx) def synchronize(device: Union[flow.device, str, int, None] = None) -> None: r""" Waits for all kernels in all streams on a CUDA device to complete. Note: In the eager mode of oneflow, all operations will be converted into instructions executed in the virtual machine, so in order to comply with the semantics of synchronization, this function will call the `eager.Sync()` function before the device is synchronized, which may affect the operations executed in other devices. Args: device (flow.device or int, optional): device for which to synchronize. It uses the current device, given by :func:`~oneflow.cuda.current_device`, if :attr:`device` is ``None`` (default). """ device_idx = _get_device_index(device, optional=True) if device_idx >= 0: flow._oneflow_internal.eager.Sync() flow._oneflow_internal.CudaSynchronize(device_idx)
[ "oneflow._oneflow_internal.SetCudaDeviceIndex", "oneflow._oneflow_internal.manual_seed", "oneflow._oneflow_internal.eager.Sync", "oneflow.cuda._utils._get_device_index", "oneflow.env.get_world_size", "oneflow._oneflow_internal.GetCudaDeviceIndex", "oneflow._oneflow_internal.CudaGetDeviceCount", "oneflow._oneflow_internal.ManualSeedAllCudaGenerator", "oneflow._oneflow_internal.CudaSynchronize" ]
[((1050, 1093), 'oneflow._oneflow_internal.CudaGetDeviceCount', 'flow._oneflow_internal.CudaGetDeviceCount', ([], {}), '()\n', (1091, 1093), True, 'import oneflow as flow\n'), ((1183, 1226), 'oneflow._oneflow_internal.GetCudaDeviceIndex', 'flow._oneflow_internal.GetCudaDeviceIndex', ([], {}), '()\n', (1224, 1226), True, 'import oneflow as flow\n'), ((1639, 1694), 'oneflow._oneflow_internal.ManualSeedAllCudaGenerator', 'flow._oneflow_internal.ManualSeedAllCudaGenerator', (['seed'], {}), '(seed)\n', (1688, 1694), True, 'import oneflow as flow\n'), ((2314, 2367), 'oneflow._oneflow_internal.manual_seed', 'flow._oneflow_internal.manual_seed', (['seed', '"""cuda"""', 'idx'], {}), "(seed, 'cuda', idx)\n", (2348, 2367), True, 'import oneflow as flow\n'), ((3023, 3048), 'oneflow.cuda._utils._get_device_index', '_get_device_index', (['device'], {}), '(device)\n', (3040, 3048), False, 'from oneflow.cuda._utils import _get_device_index\n'), ((3931, 3971), 'oneflow.cuda._utils._get_device_index', '_get_device_index', (['device'], {'optional': '(True)'}), '(device, optional=True)\n', (3948, 3971), False, 'from oneflow.cuda._utils import _get_device_index\n'), ((2889, 2914), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (2912, 2914), True, 'import oneflow as flow\n'), ((3081, 3134), 'oneflow._oneflow_internal.SetCudaDeviceIndex', 'flow._oneflow_internal.SetCudaDeviceIndex', (['device_idx'], {}), '(device_idx)\n', (3122, 3134), True, 'import oneflow as flow\n'), ((4004, 4039), 'oneflow._oneflow_internal.eager.Sync', 'flow._oneflow_internal.eager.Sync', ([], {}), '()\n', (4037, 4039), True, 'import oneflow as flow\n'), ((4048, 4098), 'oneflow._oneflow_internal.CudaSynchronize', 'flow._oneflow_internal.CudaSynchronize', (['device_idx'], {}), '(device_idx)\n', (4086, 4098), True, 'import oneflow as flow\n')]
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest import oneflow as flow import oneflow.unittest from libai.inference.image_classification import ImageClassificationPipeline from libai.utils import distributed as dist from libai.utils.file_utils import get_data_from_cache IMAGE_URL = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/LiBai/Inference/ILSVRC2012_val_00000293.JPEG" # noqa IMAGE_MD5 = "65ac8a72466e859cd3c6b279ed8e532a" class TestImageClassificationPipeline(flow.unittest.TestCase): def setUp(self) -> None: cache_dir = os.path.join( os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "inference_test_data" ) if dist.get_local_rank() == 0: # download dataset on main process of each node get_data_from_cache(IMAGE_URL, cache_dir, md5=IMAGE_MD5) self.image_path = os.path.join(cache_dir, IMAGE_URL.split("/")[-1]) @unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases") @flow.unittest.skip_unless_1n4d() def test_pipeline_with_pipeline_parallel(self): self.pipeline = ImageClassificationPipeline("configs/vit_imagenet.py", 1, 1, 4) self.pipeline(self.image_path) @unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases") @flow.unittest.skip_unless_1n4d() def test_pipeline_with_tensor_parallel(self): pass # TODO: bug occurs when tensor parallel # self.pipeline = ImageClassificationPipeline("configs/vit_imagenet.py", 1, 4, 1) # self.pipeline(self.image_path) if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.skip_unless_1n4d", "oneflow.cuda.is_available" ]
[((1598, 1630), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (1628, 1630), True, 'import oneflow as flow\n'), ((1890, 1922), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (1920, 1922), True, 'import oneflow as flow\n'), ((2198, 2213), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2211, 2213), False, 'import unittest\n'), ((1707, 1770), 'libai.inference.image_classification.ImageClassificationPipeline', 'ImageClassificationPipeline', (['"""configs/vit_imagenet.py"""', '(1)', '(1)', '(4)'], {}), "('configs/vit_imagenet.py', 1, 1, 4)\n", (1734, 1770), False, 'from libai.inference.image_classification import ImageClassificationPipeline\n'), ((1190, 1240), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CACHE_DIR"""', '"""./data_test"""'], {}), "('ONEFLOW_TEST_CACHE_DIR', './data_test')\n", (1199, 1240), False, 'import os\n'), ((1285, 1306), 'libai.utils.distributed.get_local_rank', 'dist.get_local_rank', ([], {}), '()\n', (1304, 1306), True, 'from libai.utils import distributed as dist\n'), ((1385, 1441), 'libai.utils.file_utils.get_data_from_cache', 'get_data_from_cache', (['IMAGE_URL', 'cache_dir'], {'md5': 'IMAGE_MD5'}), '(IMAGE_URL, cache_dir, md5=IMAGE_MD5)\n', (1404, 1441), False, 'from libai.utils.file_utils import get_data_from_cache\n'), ((1544, 1568), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (1566, 1568), True, 'import oneflow as flow\n'), ((1836, 1860), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (1858, 1860), True, 'import oneflow as flow\n')]
# !/usr/bin/env python # -*- coding:utf-8 -*- """ /** * Copyright 2020 Tianshu AI Platform. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================= */ Reference: - [Identity Mappings in Deep Residual Networks] (https://arxiv.org/abs/1603.05027) (CVPR 2016) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import oneflow as flow BLOCK_COUNTS = [3, 4, 6, 3] BLOCK_FILTERS = [256, 512, 1024, 2048] BLOCK_FILTERS_INNER = [64, 128, 256, 512] class ResnetBuilder(object): def __init__(self, weight_regularizer, trainable=True, training=True): self.weight_initializer = flow.variance_scaling_initializer( 2, 'fan_in', 'random_normal', data_format="NCHW") self.weight_regularizer = weight_regularizer self.trainable = trainable self.training = training def _conv2d( self, name, input, filters, kernel_size, strides=1, padding="SAME", data_format="NCHW", dilations=1, ): weight = flow.get_variable( name + "-weight", shape=(filters, input.shape[1], kernel_size, kernel_size), dtype=input.dtype, initializer=self.weight_initializer, regularizer=self.weight_regularizer, model_name="weight", trainable=self.trainable, ) return flow.nn.conv2d( input, weight, strides, padding, data_format, dilations, name=name) def _batch_norm(self, inputs, name=None, last=False): initializer = flow.zeros_initializer() if last else flow.ones_initializer() return flow.layers.batch_normalization( inputs=inputs, axis=1, momentum=0.9, # 97, epsilon=1e-5, center=True, scale=True, trainable=self.trainable, training=self.training, gamma_initializer=initializer, moving_variance_initializer=initializer, gamma_regularizer=self.weight_regularizer, beta_regularizer=self.weight_regularizer, name=name, ) def conv2d_affine( self, input, name, filters, kernel_size, strides, activation=None, last=False): # input data_format must be NCHW, cannot check now padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID" output = self._conv2d( name, input, filters, kernel_size, strides, padding) output = self._batch_norm(output, name + "_bn", last=last) if activation == "Relu": output = flow.nn.relu(output) return output def bottleneck_transformation( self, input, block_name, filters, filters_inner, strides): a = self.conv2d_affine( input, block_name + "_branch2a", filters_inner, 1, 1, activation="Relu") b = self.conv2d_affine( a, block_name + "_branch2b", filters_inner, 3, strides, activation="Relu") c = self.conv2d_affine( b, block_name + "_branch2c", filters, 1, 1, last=True) return c def residual_block( self, input, block_name, filters, filters_inner, strides_init): if strides_init != 1 or block_name == "res2_0": shortcut = self.conv2d_affine( input, block_name + "_branch1", filters, 1, strides_init ) else: shortcut = input bottleneck = self.bottleneck_transformation( input, block_name, filters, filters_inner, strides_init, ) return flow.nn.relu(bottleneck + shortcut) def residual_stage( self, input, stage_name, counts, filters, filters_inner, stride_init=2): output = input for i in range(counts): block_name = "%s_%d" % (stage_name, i) output = self.residual_block( output, block_name, filters, filters_inner, stride_init if i == 0 else 1) return output def resnet_conv_x_body(self, input): output = input for i, (counts, filters, filters_inner) in enumerate( zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER) ): stage_name = "res%d" % (i + 2) output = self.residual_stage( output, stage_name, counts, filters, filters_inner, 1 if i == 0 else 2) return output def resnet_stem(self, input): conv1 = self._conv2d("conv1", input, 64, 7, 2) conv1_bn = flow.nn.relu(self._batch_norm(conv1, "conv1_bn")) pool1 = flow.nn.max_pool2d( conv1_bn, ksize=3, strides=2, padding="SAME", data_format="NCHW", name="pool1", ) return pool1 def resnet50( images, trainable=True, need_transpose=False, training=True, wd=1.0 / 32768): weight_regularizer = flow.regularizers.l2( wd) if wd > 0.0 and wd < 1.0 else None builder = ResnetBuilder(weight_regularizer, trainable, training) # note: images.shape = (N C H W) in cc's new dataloader, transpose is not # needed anymore if need_transpose: images = flow.transpose(images, name="transpose", perm=[0, 3, 1, 2]) with flow.deprecated.variable_scope("Resnet"): stem = builder.resnet_stem(images) body = builder.resnet_conv_x_body(stem) pool5 = flow.nn.avg_pool2d( body, ksize=7, strides=1, padding="VALID", data_format="NCHW", name="pool5", ) fc1001 = flow.layers.dense( flow.reshape(pool5, (pool5.shape[0], -1)), units=1000, use_bias=True, kernel_initializer=flow.variance_scaling_initializer(2, 'fan_in', 'random_normal'), bias_initializer=flow.zeros_initializer(), kernel_regularizer=weight_regularizer, bias_regularizer=weight_regularizer, trainable=trainable, name="fc1001", ) return fc1001
[ "oneflow.nn.relu", "oneflow.transpose", "oneflow.nn.avg_pool2d", "oneflow.zeros_initializer", "oneflow.variance_scaling_initializer", "oneflow.nn.max_pool2d", "oneflow.ones_initializer", "oneflow.reshape", "oneflow.layers.batch_normalization", "oneflow.deprecated.variable_scope", "oneflow.nn.conv2d", "oneflow.regularizers.l2", "oneflow.get_variable" ]
[((1232, 1320), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {'data_format': '"""NCHW"""'}), "(2, 'fan_in', 'random_normal', data_format\n ='NCHW')\n", (1265, 1320), True, 'import oneflow as flow\n'), ((1665, 1913), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': '(filters, input.shape[1], kernel_size, kernel_size)', 'dtype': 'input.dtype', 'initializer': 'self.weight_initializer', 'regularizer': 'self.weight_regularizer', 'model_name': '"""weight"""', 'trainable': 'self.trainable'}), "(name + '-weight', shape=(filters, input.shape[1],\n kernel_size, kernel_size), dtype=input.dtype, initializer=self.\n weight_initializer, regularizer=self.weight_regularizer, model_name=\n 'weight', trainable=self.trainable)\n", (1682, 1913), True, 'import oneflow as flow\n'), ((2010, 2096), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilations'], {'name': 'name'}), '(input, weight, strides, padding, data_format, dilations,\n name=name)\n', (2024, 2096), True, 'import oneflow as flow\n'), ((2336, 2679), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': '(1)', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'self.trainable', 'training': 'self.training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'self.weight_regularizer', 'beta_regularizer': 'self.weight_regularizer', 'name': 'name'}), '(inputs=inputs, axis=1, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=self.trainable,\n training=self.training, gamma_initializer=initializer,\n moving_variance_initializer=initializer, gamma_regularizer=self.\n weight_regularizer, beta_regularizer=self.weight_regularizer, name=name)\n', (2367, 2679), True, 'import oneflow as flow\n'), ((4765, 4800), 'oneflow.nn.relu', 'flow.nn.relu', (['(bottleneck + shortcut)'], {}), '(bottleneck + shortcut)\n', (4777, 4800), True, 'import oneflow as flow\n'), ((5963, 6065), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv1_bn'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool1"""'}), "(conv1_bn, ksize=3, strides=2, padding='SAME',\n data_format='NCHW', name='pool1')\n", (5981, 6065), True, 'import oneflow as flow\n'), ((6325, 6349), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['wd'], {}), '(wd)\n', (6345, 6349), True, 'import oneflow as flow\n'), ((6602, 6661), 'oneflow.transpose', 'flow.transpose', (['images'], {'name': '"""transpose"""', 'perm': '[0, 3, 1, 2]'}), "(images, name='transpose', perm=[0, 3, 1, 2])\n", (6616, 6661), True, 'import oneflow as flow\n'), ((6672, 6712), 'oneflow.deprecated.variable_scope', 'flow.deprecated.variable_scope', (['"""Resnet"""'], {}), "('Resnet')\n", (6702, 6712), True, 'import oneflow as flow\n'), ((6821, 6921), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['body'], {'ksize': '(7)', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool5"""'}), "(body, ksize=7, strides=1, padding='VALID', data_format=\n 'NCHW', name='pool5')\n", (6839, 6921), True, 'import oneflow as flow\n'), ((2259, 2283), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2281, 2283), True, 'import oneflow as flow\n'), ((2297, 2320), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (2318, 2320), True, 'import oneflow as flow\n'), ((3444, 3464), 'oneflow.nn.relu', 'flow.nn.relu', (['output'], {}), '(output)\n', (3456, 3464), True, 'import oneflow as flow\n'), ((7048, 7089), 'oneflow.reshape', 'flow.reshape', (['pool5', '(pool5.shape[0], -1)'], {}), '(pool5, (pool5.shape[0], -1))\n', (7060, 7089), True, 'import oneflow as flow\n'), ((7173, 7236), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {}), "(2, 'fan_in', 'random_normal')\n", (7206, 7236), True, 'import oneflow as flow\n'), ((7267, 7291), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (7289, 7291), True, 'import oneflow as flow\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict from random import shuffle import numpy as np from oneflow.test_utils.automated_test_util import * from oneflow.test_utils.test_util import GenArgList import oneflow.unittest import oneflow as flow @flow.unittest.skip_unless_1n1d() class TestContiguous(flow.unittest.TestCase): @autotest(n=10, check_graph=True) def test_transpose_with_random_data(test_case): device = random_device() x = random_tensor(ndim=4).to(device) y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int)) z = y.contiguous() return y @autotest(n=10, check_graph=True) def test_permute2d_tensor_with_random_data(test_case): device = random_device() ndim = 2 permute_list = [0, 1] shuffle(permute_list) x = random_tensor( ndim=ndim, dim0=random(1, 32).to(int), dim1=random(1, 59).to(int), ).to(device) y = x.permute(permute_list) z = y.contiguous() return z @autotest(n=10, check_graph=True) def test_permute3d_tensor_with_random_data(test_case): device = random_device() ndim = 3 permute_list = [0, 1, 2] shuffle(permute_list) x = random_tensor( ndim=ndim, dim0=random(1, 32).to(int), dim1=random(1, 59).to(int), dim2=random(1, 65).to(int), ).to(device) y = x.permute(permute_list) z = y.contiguous() return z @autotest(n=10, check_graph=True) def test_permute4d_tensor_with_random_data(test_case): device = random_device() ndim = 4 permute_list = [0, 1, 2, 3] shuffle(permute_list) x = random_tensor( ndim=ndim, dim0=random(1, 32).to(int), dim1=random(1, 59).to(int), dim2=random(1, 65).to(int), dim3=random(1, 127).to(int), ).to(device) y = x.permute(permute_list) z = y.contiguous() return z if __name__ == "__main__": unittest.main()
[ "oneflow.unittest.skip_unless_1n1d" ]
[((846, 878), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (876, 878), True, 'import oneflow as flow\n'), ((2678, 2693), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2691, 2693), False, 'import unittest\n'), ((1408, 1429), 'random.shuffle', 'shuffle', (['permute_list'], {}), '(permute_list)\n', (1415, 1429), False, 'from random import shuffle\n'), ((1826, 1847), 'random.shuffle', 'shuffle', (['permute_list'], {}), '(permute_list)\n', (1833, 1847), False, 'from random import shuffle\n'), ((2311, 2332), 'random.shuffle', 'shuffle', (['permute_list'], {}), '(permute_list)\n', (2318, 2332), False, 'from random import shuffle\n')]
import oneflow as flow import oneflow.nn as nn from typing import Union, List, Dict, Any, cast from collections import namedtuple __all__ = [ "vgg16", "vgg16_bn", "vgg19_bn", "vgg19", ] # 31 is the raw depth of vgg16, while 37 is the raw depth of vgg19 slice_pos = {31: [4, 9, 16, 23], 37: [4, 9, 18, 27]} class VGG_WITH_FEATURES(flow.nn.Module): def __init__(self, vgg_pretrained_features, requires_grad): super(VGG_WITH_FEATURES, self).__init__() self.slice1 = flow.nn.Sequential() self.slice2 = flow.nn.Sequential() self.slice3 = flow.nn.Sequential() self.slice4 = flow.nn.Sequential() pos = slice_pos[len(vgg_pretrained_features)] for x in range(pos[0]): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(pos[0], pos[1]): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(pos[1], pos[2]): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(pos[2], pos[3]): self.slice4.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h vgg_outputs = namedtuple( "VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"] ) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3) return out class VGG(nn.Module): def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True ) -> None: super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x: flow.Tensor) -> flow.Tensor: x = self.features(x) x = self.avgpool(x) x = flow.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: layers: List[nn.Module] = [] in_channels = 3 for v in cfg: if v == "M": layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: v = cast(int, v) conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) cfgs: Dict[str, List[Union[str, int]]] = { "A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], "B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], "D": [ 64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M", ], "E": [ 64, 64, "M", 128, 128, "M", 256, 256, 256, 256, "M", 512, 512, 512, 512, "M", 512, 512, 512, 512, "M", ], } def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained, model_path) -> VGG: model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm)) if pretrained: model.load_state_dict(flow.load(model_path), strict=False) return model def vgg16( pretrained=False, model_path="/" ) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. The required minimum input size of the model is 32x32. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ return _vgg("vgg16", "D", False, pretrained, model_path) def vgg16_bn() -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. The required minimum input size of the model is 32x32. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ return _vgg("vgg16_bn", "D", True) def vgg19( pretrained=False, model_path="pre_model/" ) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. The required minimum input size of the model is 32x32. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ return _vgg("vgg19", "E", False, pretrained, model_path) def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. The required minimum input size of the model is 32x32. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ return _vgg("vgg19_bn", "E", True)
[ "oneflow.nn.init.constant_", "oneflow.nn.ReLU", "oneflow.nn.Linear", "oneflow.flatten", "oneflow.nn.Conv2d", "oneflow.nn.BatchNorm2d", "oneflow.nn.init.kaiming_normal_", "oneflow.load", "oneflow.nn.Sequential", "oneflow.nn.MaxPool2d", "oneflow.nn.AdaptiveAvgPool2d", "oneflow.nn.Dropout", "oneflow.nn.init.normal_" ]
[((3634, 3656), 'oneflow.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3647, 3656), True, 'import oneflow.nn as nn\n'), ((503, 523), 'oneflow.nn.Sequential', 'flow.nn.Sequential', ([], {}), '()\n', (521, 523), True, 'import oneflow as flow\n'), ((546, 566), 'oneflow.nn.Sequential', 'flow.nn.Sequential', ([], {}), '()\n', (564, 566), True, 'import oneflow as flow\n'), ((589, 609), 'oneflow.nn.Sequential', 'flow.nn.Sequential', ([], {}), '()\n', (607, 609), True, 'import oneflow as flow\n'), ((632, 652), 'oneflow.nn.Sequential', 'flow.nn.Sequential', ([], {}), '()\n', (650, 652), True, 'import oneflow as flow\n'), ((1506, 1576), 'collections.namedtuple', 'namedtuple', (['"""VggOutputs"""', "['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3']"], {}), "('VggOutputs', ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])\n", (1516, 1576), False, 'from collections import namedtuple\n'), ((1923, 1951), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(7, 7)'], {}), '((7, 7))\n', (1943, 1951), True, 'import oneflow.nn as nn\n'), ((2416, 2434), 'oneflow.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2428, 2434), True, 'import oneflow as flow\n'), ((2005, 2033), 'oneflow.nn.Linear', 'nn.Linear', (['(512 * 7 * 7)', '(4096)'], {}), '(512 * 7 * 7, 4096)\n', (2014, 2033), True, 'import oneflow.nn as nn\n'), ((2047, 2060), 'oneflow.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2054, 2060), True, 'import oneflow.nn as nn\n'), ((2074, 2086), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (2084, 2086), True, 'import oneflow.nn as nn\n'), ((2100, 2121), 'oneflow.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (2109, 2121), True, 'import oneflow.nn as nn\n'), ((2135, 2148), 'oneflow.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2142, 2148), True, 'import oneflow.nn as nn\n'), ((2162, 2174), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (2172, 2174), True, 'import oneflow.nn as nn\n'), ((2188, 2216), 'oneflow.nn.Linear', 'nn.Linear', (['(4096)', 'num_classes'], {}), '(4096, num_classes)\n', (2197, 2216), True, 'import oneflow.nn as nn\n'), ((3329, 3341), 'typing.cast', 'cast', (['int', 'v'], {}), '(int, v)\n', (3333, 3341), False, 'from typing import Union, List, Dict, Any, cast\n'), ((3363, 3414), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (3372, 3414), True, 'import oneflow.nn as nn\n'), ((4596, 4617), 'oneflow.load', 'flow.load', (['model_path'], {}), '(model_path)\n', (4605, 4617), True, 'import oneflow as flow\n'), ((2617, 2687), 'oneflow.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (2640, 2687), True, 'import oneflow.nn as nn\n'), ((3260, 3297), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (3272, 3297), True, 'import oneflow.nn as nn\n'), ((2747, 2775), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (2764, 2775), True, 'import oneflow.nn as nn\n'), ((2840, 2870), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (2857, 2870), True, 'import oneflow.nn as nn\n'), ((2887, 2915), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (2904, 2915), True, 'import oneflow.nn as nn\n'), ((3477, 3494), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (3491, 3494), True, 'import oneflow.nn as nn\n'), ((3496, 3517), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3503, 3517), True, 'import oneflow.nn as nn\n'), ((3572, 3593), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3579, 3593), True, 'import oneflow.nn as nn\n'), ((2975, 3009), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (2990, 3009), True, 'import oneflow.nn as nn\n'), ((3026, 3054), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3043, 3054), True, 'import oneflow.nn as nn\n')]