Skip to content

The "No gradients provided for any variable" error occurs when optimizing the position of the antenna with Sionna RT #7

@JackYifan

Description

@JackYifan

@jhoydis Hello, I would like to ask how to optimize antenna position using sionna RT?

# Set some environment variables
import os
gpu_num = 0 # GPU to be used. Use "" to use the CPU
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress some TF warnings
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu_num}"
import sionna

# Configure GPU
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    try:
        tf.config.experimental.set_memory_growth(gpus[0], True)
    except RuntimeError as e:
        print(e)

# Avoid warnings from TensorFlow
import warnings
tf.get_logger().setLevel('ERROR')
warnings.filterwarnings('ignore')
# Other imports
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sionna.rt import load_scene, PlanarArray, Transmitter, Receiver, Camera

# Fix the seed for reproducible results
tf.random.set_seed(1)
# Load the scene
scene = load_scene(sionna.rt.scene.box)

# Set the scattering coefficient of the radio material in the scene
# to a non-zero value to enable scattering
for mat in scene.radio_materials.values():
    mat.scattering_coefficient = 1./tf.sqrt(3.)

# Configure the transmit array
scene.tx_array = PlanarArray(num_rows=1, num_cols=1,
                             vertical_spacing=0.5,
                             horizontal_spacing=0.5,
                             pattern="iso",
                             polarization="V",
                             polarization_model=2)

# Configure the receive array (use to compute the coverage map)
scene.rx_array = PlanarArray(num_rows=1, num_cols=1,
                             vertical_spacing=0.5,
                             horizontal_spacing=0.5,
                             pattern="iso",
                             polarization="V",
                             polarization_model=2)

# Create a transmitter and add it to the scene
tx = Transmitter("tx", position=[0.0,0.0,2.5],
                 orientation=[0.0,0.0,0.0]) # Trainable orientation
tx.position = tf.Variable([0.0,0.0,2.5],dtype=tf.float32,trainable=True)
tx.orientation = tf.Variable([0.0,0.0,0.0],dtype=tf.float32,trainable=False)
scene.add(tx)
cm_cell_size = np.array([0.1,0.1]) # Each cell is 2mx2m
# Rectangle defining the target area
target_center = np.array([-2.5,-2.5,1.5]) # Center
target_size = np.array([5.,5.]) # Size
target_orientation = np.array([0.,0.,0.]) # Orientation: parallel to XY
# Configure an SGD optimizer
optimizer = tf.keras.optimizers.RMSprop(0.1)
# optimizer = tf.keras.optimizers.Adam()
# Number of training steps
num_steps = 10
scene.transmitters['tx'].position = tf.Variable([0.0,0.0,2.5],trainable=True)
scene.transmitters['tx'].orientation = tf.Variable([0.0,0.0,0.0],trainable=False)
def train_step():
    """A single training step"""
    with tf.GradientTape() as tape:
        tape.watch(scene.transmitters['tx'].position)
        # Compute coverage of the target area
        target_cm = scene.coverage_map(cm_center=target_center,
                                       cm_orientation=target_orientation, 
                                       cm_size=target_size, # Target area 
                                       cm_cell_size=cm_cell_size,
                                       diffraction=True, scattering=True, # Enable diffraction and scattering in addition to reflection and LoS
                                       check_scene=False) # Don't check the scene prior to compute to speed things up
        # The loss function is a rate in bit
        # We fix an arbitrary scaling factor corresponding to the transmit to noise power ratio
        # The scaling has a direct impact the gradient magnitudes
        scaling = 1e6
        rate = tf.reduce_mean(tf.math.log(1. + target_cm.as_tensor()*scaling))/tf.math.log(2.)
        loss = -rate
    
    # Compute gradients and apply through the optimizer
    # print(tape.watched_variables())
    grads = tape.gradient(loss, tape.watched_variables())
    print(grads)
    optimizer.apply_gradients(zip(grads, tape.watched_variables()))
    return rate

for step in range(num_steps):       
    rate = train_step()
    print(f"Training step {step} - Rate: {rate.numpy():.2E} bit - tx position: {scene.transmitters['tx'].position.numpy()}", end='\n')

I tried but get the error "No gradients provided for any variable".

No gradients provided for any variable: (['Variable:0'],). Provided `grads_and_vars` is ((None, <tf.Variable 'Variable:0' shape=(3,) dtype=float32, numpy=array([0. , 0. , 2.5], dtype=float32)>),).

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions