Skip to content

afnio.optim.optimizer

afnio.optim.optimizer.StateDict = Dict[str, Any] module-attribute

Type alias for the state dictionary used in optimizers. This dictionary contains the state of the optimizer, including Parameter states and Parameter groups.

afnio.optim.optimizer.Optimizer

Base class for all optimizers.

Warning

Parameters need to be specified as collections that have a deterministic ordering that is consistent between runs. Examples of objects that don't satisfy those properties are sets and iterators over values of dictionaries.

Source code in afnio/optim/optimizer.py
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
class Optimizer:
    """Base class for all optimizers.

    Warning:
        Parameters need to be specified as collections that have a deterministic
        ordering that is consistent between runs. Examples of objects that don't
        satisfy those properties are sets and iterators over values of dictionaries.
    """

    defaults: Dict[str, Any] = {}
    state: DefaultDict[Variable, Any] = defaultdict(dict)
    param_groups: List[Dict[str, Any]] = []
    optimizer_id: Optional[str]

    def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None:
        """Initialize the optimizer.

        Args:
            params (iterable): An iterable of [`Variable`][afnio.Variable]s
                or [`dict`][dict]s. Specifies what Variables should be optimized.
            defaults: A dict containing default values of optimization options
                (used when a parameter group doesn't specify them).
        """
        # Websocket attributes
        self.optimizer_id = None
        # Internal attributes
        self.defaults = {}
        self.state = defaultdict(dict)
        self.param_groups = []

        # Determine which child class is instantiating this Optimizer
        child_class = self.__class__.__name__

        try:
            # Get the singleton websocket client
            _, ws_client = get_default_clients()

            params = list(params)
            payload = {
                "optimizer_name": child_class,
                "params": _serialize_arg(params),
                "defaults": _serialize_arg(defaults),
            }
            response = run_in_background_loop(
                ws_client.call("create_optimizer", payload)
            )
            if "error" in response:
                raise RuntimeError(
                    response["error"]["data"].get("exception", response["error"])
                )

            logger.debug(f"Optimizer created and shared with the server: {self!r}")

            result = response.get("result", {})
            optimizer_id = result.get("optimizer_id")
            defaults = result.get("defaults")
            state = result.get("state")
            param_groups = result.get("param_groups")

            if not optimizer_id:
                raise RuntimeError(
                    f"Server did not return an optimizer_id "
                    f"for payload: {payload!r}, response: {response!r}"
                )
            self.optimizer_id = optimizer_id
            self.defaults = _deserialize_output(defaults)
            self.state = _deserialize_output(state)
            self.param_groups = _deserialize_output(param_groups)
            register_optimizer(self)
        except Exception as e:
            logger.error(f"Failed to share Optimizer with the server: {e}")
            raise

    def __getstate__(self) -> Dict[str, Any]:
        return {
            "defaults": self.defaults,
            "state": self.state,
            "param_groups": self.param_groups,
        }

    def __setstate__(self, state: Dict[str, Any]) -> None:
        self.__dict__.update(state)
        self._patch_step_function()  # To support multiprocessing pickle/unpickle
        self.defaults.setdefault("differentiable", False)

    def __repr__(self) -> str:
        format_string = self.__class__.__name__ + " ("
        for i, group in enumerate(self.param_groups):
            format_string += "\n"
            format_string += f"Parameter Group {i}\n"
            for key in sorted(group.keys()):
                if key != "params":
                    format_string += f"    {key}: {group[key]}\n"
        format_string += ")"
        return format_string

    def _patch_step_function(self) -> None:
        self._clear_grad_profile_name = (
            f"Optimizer.clear_grad#{self.__class__.__name__}.clear_grad"
        )

    def state_dict(self) -> StateDict:
        """Returns the state of the optimizer as a [`dict`][dict].

        It contains two entries:

        * `state`: a Dict holding current optimization state. Its content
            differs between optimizer classes, but some common characteristics
            hold. For example, state is saved per parameter, and the parameter
            itself is NOT saved. `state` is a Dictionary mapping parameter ids
            to a Dict with state corresponding to each parameter.
        * `param_groups`: a List containing all parameter groups where each
            parameter group is a Dict. Each parameter group contains metadata
            specific to the optimizer, such as learning rate and momentum,
            as well as a List of parameter IDs of the parameters in the group.

        NOTE: The parameter IDs may look like indices but they are just IDs
        associating state with param_group. When loading from a state_dict,
        the optimizer will zip the param_group `params` (int IDs) and the
        optimizer `param_groups` (actual `cog.Parameter` s) in order to
        match state WITHOUT additional verification.

        Returns:
            A dictionary containing the optimizer state.

        Examples:
            A returned state dict might look something like:

            ```text
            {
                'state': {
                    0: {
                        'momentum_buffer': [
                            (
                                Parameter(data='You are...', role='system prompt', requires_grad=True),
                                [Variable(data='The system prompt should...', role='gradient for system prompt')]
                            )
                        ]
                    },
                    1: {
                        'momentum_buffer': [
                            (
                                Parameter(data='Answer this...', role='instructin prompt', requires_grad=True),
                                [Variable(data='The instruction prompt must...', role='gradient to instruction prompt')]
                            )
                        ]
                    }
                },
                'param_groups': [
                    {
                        'model_client': {'class_type': 'AsyncOpenAI'},
                        'messages': [
                            {
                                'role': 'system',
                                'content': [Variable(data='You are part of an optimization system...', role='optimizer system prompt', requires_grad=False)]
                            },
                            {
                                'role': 'user',
                                'content': [Variable(data='Here is the variable you need...', role='optimizer user prompt', requires_grad=False)]
                            }
                        ],
                        'inputs': {},
                        'constraints': [],
                        'momentum': 2,
                        'completion_args': {'model': 'gpt-4o'},
                        'params': [0, 1]
                    }
                ]
            }
            ```
        """  # noqa: E501

        # Save order indices instead of Variables
        param_mappings: Dict[int, int] = {}
        start_index = 0

        def pack_group(group: Dict[str, Any]) -> Dict[str, Any]:
            nonlocal start_index
            packed = {k: v for k, v in group.items() if k != "params"}

            # Custom serialization for model clients
            for key, value in packed.items():
                if isinstance(value, BaseModel):
                    packed[key] = deepcopy(value)  # Trigger custom __deepcopy__

            param_mappings.update(
                {
                    id(p): i
                    for i, p in enumerate(group["params"], start_index)
                    if id(p) not in param_mappings
                }
            )
            packed["params"] = [param_mappings[id(p)] for p in group["params"]]
            start_index += len(packed["params"])
            return packed

        param_groups = [pack_group(g) for g in self.param_groups]
        # Remap state to use order indices as keys
        packed_state = {
            (param_mappings[id(k)] if isinstance(k, Variable) else k): v
            for k, v in self.state.items()
        }

        state_dict = {
            "state": packed_state,
            "param_groups": param_groups,
        }

        return state_dict

    @staticmethod
    def _process_value_according_to_param_policy(
        param: Variable,
        value: Variable,
        param_id: int,
        param_groups: List[Dict[Any, Any]],
        key: Hashable = None,
    ) -> Variable:
        assert param_groups is not None
        if key == "step":
            return value
        else:
            if param.is_floating_point():
                return value.to(dtype=float)
            else:
                return value

    def load_state_dict(
        self, state_dict: StateDict, model_clients: Dict[str, BaseModel] = None
    ) -> None:
        """Loads the optimizer state.

        Args:
            state_dict: Optimizer state. Should be an object returned
                from a call to [`state_dict`][..state_dict].
            model_clients: A dictionary mapping model client keys
                (e.g., `'fw_model_client'`) to their respective instances of
                [`BaseModel`][afnio.models.model.BaseModel]. These instances
                will be used to reconstruct any model clients referenced within
                the optimizer state. If a required model client is missing, an
                error will be raised with instructions on how to provide the
                missing client.

        Raises:
            ValueError: If the provided `state_dict` is invalid, such as when the
                parameter groups or their sizes do not match the current optimizer
                configuration.
            ValueError: If a required model client is missing from the
                `model_clients` dictionary, with details about the expected
                model client type and key.

        Examples:
            >>> openai_client = AsyncOpenAI()
            >>> optimizer.load_state_dict(saved_state_dict, model_clients={
            ...     'model_client': openai_client)
            ... })
        """
        # shallow copy, to be consistent with module API
        state_dict = state_dict.copy()

        # Validate the state_dict
        groups = self.param_groups

        # Deepcopy as we write into saved_groups later to update state
        saved_groups = deepcopy(state_dict["param_groups"])

        if len(groups) != len(saved_groups):
            raise ValueError(
                "Loaded state dict has a different number of parameter groups."
            )
        param_lens = (len(g["params"]) for g in groups)
        saved_lens = (len(g["params"]) for g in saved_groups)
        if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
            raise ValueError(
                "Loaded state dict contains a parameter group "
                "that doesn't match the size of optimizer's group."
            )

        # Update the state
        id_map = dict(
            zip(
                chain.from_iterable(g["params"] for g in saved_groups),
                chain.from_iterable(g["params"] for g in groups),
            )
        )

        def _cast(param, value, param_id=None, param_groups=None, key=None):
            r"""Make a deep copy of value, casting all variables to device of param."""
            if isinstance(value, Variable):
                return Optimizer._process_value_according_to_param_policy(
                    param, value, param_id, param_groups, key
                )
            elif isinstance(value, dict):
                return {
                    k: _cast(
                        param, v, param_id=param_id, param_groups=param_groups, key=k
                    )
                    for k, v in value.items()
                }
            elif isinstance(value, Iterable):
                return type(value)(
                    _cast(param, v, param_id=param_id, param_groups=param_groups)
                    for v in value
                )
            else:
                return value

        # Copy state assigned to params (and cast variables to appropriate types).
        # State that is not assigned to params is copied as is (needed for
        # backward compatibility).
        state: DefaultDict[Variable, Dict[Any, Any]] = defaultdict(dict)
        for k, v in state_dict["state"].items():
            if k in id_map:
                param = id_map[k]
                state[param] = _cast(
                    param, v, param_id=k, param_groups=state_dict["param_groups"]
                )
            else:
                state[k] = v

        # Update parameter groups, setting their 'params' value
        def update_group(
            group: Dict[str, Any], new_group: Dict[str, Any]
        ) -> Dict[str, Any]:
            new_group["params"] = group["params"]
            return new_group

        # Reconstruct model clients if needed
        model_clients = model_clients or {}
        for group in saved_groups:
            for key, value in group.items():
                if isinstance(value, dict) and "class_type" in value:
                    cls_name = value["class_type"]
                    cls = MODEL_REGISTRY.get(cls_name)
                    if cls and issubclass(cls, BaseModel):
                        if key in model_clients:
                            # Create new model client istance
                            group[key] = model_clients[key]

                            # Add usage metadata to new model client instance
                            usage = value.get("usage", {})
                            group[key].update_usage(usage)
                        else:
                            raise ValueError(
                                f"Missing model client for '{key}' of expected type "
                                f"'{cls_name}'. Please provide an instance of "
                                f"'{cls_name}' using the `model_clients` input "
                                f"dictionary and retry "
                                f"the `load_state_dict()` operation."
                            )

        param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
        self.__setstate__({"state": state, "param_groups": param_groups})

    def _on_clear_grad(self):
        """
        Notify the server that all gradients for this optimizer are being cleared.

        This method sends a 'clear_grad' RPC request to the server with the optimizer's
        ID. It waits for the server to acknowledge the request and checks that the
        response matches the optimizer's ID. If the server confirms, the method returns
        normally; otherwise, it raises an error. This ensures that the server and client
        remain synchronized regarding the clearing of gradients.

        Raises:
            RuntimeError: If the server response does not match the optimizer ID or if
                the notification fails for any reason.
        """
        payload = {
            "optimizer_id": self.optimizer_id,
        }

        try:
            _, ws_client = get_default_clients()
            response = run_in_background_loop(ws_client.call("clear_grad", payload))
            if "error" in response:
                raise RuntimeError(
                    response["error"]["data"].get("exception", response["error"])
                )

            # Check server response
            result_message = response.get("result", {}).get("message")
            if result_message != "Gradients cleared successfully.":
                raise RuntimeError(
                    f"Server response mismatch: (received {response['result']!r}, "
                    f"but expected optimizer_id={self.optimizer_id!r})"
                )
            logger.debug(
                f"Gradient clearing notified to server and confirmed: "
                f"optimizer_id={self.optimizer_id!r}"
            )

        except Exception as e:
            logger.exception(f"Failed to notify server of gradient clearing: {e}")
            raise

    def clear_grad(self) -> None:
        """Resets the gradients of all optimized [`Variable`][afnio.Variable]s
        by setting the [`grad`][afnio.Variable.grad] attribute of each parameter
        to an empty list.
        """
        self._on_clear_grad()
        for group in self.param_groups:
            for p in group["params"]:
                if p.grad:
                    p.grad.clear()

    @overload
    def step(self, closure: None = ...) -> None: ...

    @overload
    def step(
        self, closure: Callable[[], Tuple[Variable, Variable]]
    ) -> Tuple[Variable, Variable]: ...

    def step(
        self, closure: Optional[Callable[[], Tuple[Variable, Variable]]] = None
    ) -> Optional[Tuple[Variable, Variable]]:
        """Performs a single optimization step (parameter update).

        Args:
            closure (Callable, optional): A closure that reevaluates the agent and
                returns the loss as a tuple containing a numerical score and a textual
                explanation. This closure is optional for most optimizers.

        Note:
            Unless otherwise specified, this function should not modify the
            [`grad`][afnio.Variable.grad] field of the parameters.

        Note:
            Some optimization algorithms need to reevaluate the function multiple times,
            so you have to pass in a `closure` that allows them to recompute your agent.
            The closure should clear the gradients, compute the loss, and return it.

        Examples:
            Example of using a closure as described in the note above:
            >>> for input, target in dataset:
            ...     def closure():
            ...         optimizer.clear_grad()
            ...         output = agent(input)
            ...         loss = loss_fn(output, target)
            ...         loss.backward()
            ...         return loss
            ...
            ...     optimizer.step(closure)
        """
        # Set `_pending_data` for all parameters that will be optimized
        for group in self.param_groups:
            for p in group["params"]:
                p._pending_data = True
                logger.debug(
                    f"Marked variable {p.variable_id!r} as pending for data update."
                )

        try:
            # Get the singleton websocket client
            _, ws_client = get_default_clients()

            payload = {
                "optimizer_id": self.optimizer_id,
            }
            response = run_in_background_loop(ws_client.call("run_step", payload))
            if "error" in response:
                raise RuntimeError(
                    response["error"]["data"].get("exception", response["error"])
                )

            logger.debug(
                f"Optimization instantiated and shared with the server: "
                f"optimizer_id={self.optimizer_id!r}"
            )

            result = response.get("result", {})
            result_message = result.get("message")
            result_state = result.get("state", [])

            # Extract all variable_ids from the result_state
            # and wait for them to be registered in VARIABLE_REGISTRY
            all_var_ids = self._extract_variable_ids_from_state(result_state)
            for var_id in all_var_ids:
                _wait_for_variable(var_id)

            des_result_state = _deserialize_state(result_state)

            # Convert [param, grads] lists to (param, grads) tuples
            for state in des_result_state.values():
                if "momentum_buffer" in state:
                    state["momentum_buffer"] = [
                        (
                            tuple(pair)
                            if isinstance(pair, list) and len(pair) == 2
                            else pair
                        )
                        for pair in state["momentum_buffer"]
                    ]

            if result_message != "Optimizer step executed successfully.":
                raise RuntimeError(
                    f"Server did not return any data for optimization operation: "
                    f"payload={payload!r}, response={response!r}"
                )

            self.state = des_result_state

            logger.debug(
                f"Optimization executed successfully: "
                f"optimizer_id={self.optimizer_id!r}"
            )

        except Exception as e:
            logger.error(f"Failed to run optimization on the server: {e}")

            # Clear all pending data flags to avoid deadlocks
            for group in self.param_groups:
                for p in group["params"]:
                    p._pending_data = False
                    logger.debug(
                        f"Marked variable {p.variable_id!r} as not pending for data "
                        f"update after error."
                    )

            raise

    def add_param_group(self, param_group: Dict[str, Any]) -> None:
        """Add a param group to the [`Optimizer`][..]'s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can
        be made trainable and added to the [`Optimizer`][..] as training progresses.

        Args:
            param_group: Specifies what Variables should be optimized along with
                group specific optimization options.
        """
        try:
            # Get the singleton websocket client
            _, ws_client = get_default_clients()

            messages = self.defaults.get("messages", [])
            payload = {
                "optimizer_id": self.optimizer_id,
                "messages": _serialize_arg(messages),
                "param_group": _serialize_arg(param_group),
            }
            response = run_in_background_loop(
                ws_client.call("add_param_group", payload)
            )
            if "error" in response:
                raise RuntimeError(
                    response["error"]["data"].get("exception", response["error"])
                )

            logger.debug(
                f"Param group added and shared with the server: {param_group!r}"
            )

            result = response.get("result", {})
            param_group = result.get("param_group")

            if not param_group:
                raise RuntimeError(
                    f"Server did not return a param_group "
                    f"for payload: {payload!r}, response: {response!r}"
                )
            self.param_groups.append(_deserialize_output(param_group))
        except Exception as e:
            logger.error(f"Failed to add param group to the optimizer: {e}")
            raise

    def _extract_variable_ids_from_state(self):
        raise NotImplementedError

__init__(params, defaults)

Initialize the optimizer.

Parameters:

Name Type Description Default
params iterable

An iterable of Variables or dicts. Specifies what Variables should be optimized.

required
defaults dict[str, Any]

A dict containing default values of optimization options (used when a parameter group doesn't specify them).

required
Source code in afnio/optim/optimizer.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None:
    """Initialize the optimizer.

    Args:
        params (iterable): An iterable of [`Variable`][afnio.Variable]s
            or [`dict`][dict]s. Specifies what Variables should be optimized.
        defaults: A dict containing default values of optimization options
            (used when a parameter group doesn't specify them).
    """
    # Websocket attributes
    self.optimizer_id = None
    # Internal attributes
    self.defaults = {}
    self.state = defaultdict(dict)
    self.param_groups = []

    # Determine which child class is instantiating this Optimizer
    child_class = self.__class__.__name__

    try:
        # Get the singleton websocket client
        _, ws_client = get_default_clients()

        params = list(params)
        payload = {
            "optimizer_name": child_class,
            "params": _serialize_arg(params),
            "defaults": _serialize_arg(defaults),
        }
        response = run_in_background_loop(
            ws_client.call("create_optimizer", payload)
        )
        if "error" in response:
            raise RuntimeError(
                response["error"]["data"].get("exception", response["error"])
            )

        logger.debug(f"Optimizer created and shared with the server: {self!r}")

        result = response.get("result", {})
        optimizer_id = result.get("optimizer_id")
        defaults = result.get("defaults")
        state = result.get("state")
        param_groups = result.get("param_groups")

        if not optimizer_id:
            raise RuntimeError(
                f"Server did not return an optimizer_id "
                f"for payload: {payload!r}, response: {response!r}"
            )
        self.optimizer_id = optimizer_id
        self.defaults = _deserialize_output(defaults)
        self.state = _deserialize_output(state)
        self.param_groups = _deserialize_output(param_groups)
        register_optimizer(self)
    except Exception as e:
        logger.error(f"Failed to share Optimizer with the server: {e}")
        raise

state_dict()

Returns the state of the optimizer as a dict.

It contains two entries:

  • state: a Dict holding current optimization state. Its content differs between optimizer classes, but some common characteristics hold. For example, state is saved per parameter, and the parameter itself is NOT saved. state is a Dictionary mapping parameter ids to a Dict with state corresponding to each parameter.
  • param_groups: a List containing all parameter groups where each parameter group is a Dict. Each parameter group contains metadata specific to the optimizer, such as learning rate and momentum, as well as a List of parameter IDs of the parameters in the group.

NOTE: The parameter IDs may look like indices but they are just IDs associating state with param_group. When loading from a state_dict, the optimizer will zip the param_group params (int IDs) and the optimizer param_groups (actual cog.Parameter s) in order to match state WITHOUT additional verification.

Returns:

Type Description
StateDict

A dictionary containing the optimizer state.

Examples:

A returned state dict might look something like:

{
    'state': {
        0: {
            'momentum_buffer': [
                (
                    Parameter(data='You are...', role='system prompt', requires_grad=True),
                    [Variable(data='The system prompt should...', role='gradient for system prompt')]
                )
            ]
        },
        1: {
            'momentum_buffer': [
                (
                    Parameter(data='Answer this...', role='instructin prompt', requires_grad=True),
                    [Variable(data='The instruction prompt must...', role='gradient to instruction prompt')]
                )
            ]
        }
    },
    'param_groups': [
        {
            'model_client': {'class_type': 'AsyncOpenAI'},
            'messages': [
                {
                    'role': 'system',
                    'content': [Variable(data='You are part of an optimization system...', role='optimizer system prompt', requires_grad=False)]
                },
                {
                    'role': 'user',
                    'content': [Variable(data='Here is the variable you need...', role='optimizer user prompt', requires_grad=False)]
                }
            ],
            'inputs': {},
            'constraints': [],
            'momentum': 2,
            'completion_args': {'model': 'gpt-4o'},
            'params': [0, 1]
        }
    ]
}
Source code in afnio/optim/optimizer.py
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
def state_dict(self) -> StateDict:
    """Returns the state of the optimizer as a [`dict`][dict].

    It contains two entries:

    * `state`: a Dict holding current optimization state. Its content
        differs between optimizer classes, but some common characteristics
        hold. For example, state is saved per parameter, and the parameter
        itself is NOT saved. `state` is a Dictionary mapping parameter ids
        to a Dict with state corresponding to each parameter.
    * `param_groups`: a List containing all parameter groups where each
        parameter group is a Dict. Each parameter group contains metadata
        specific to the optimizer, such as learning rate and momentum,
        as well as a List of parameter IDs of the parameters in the group.

    NOTE: The parameter IDs may look like indices but they are just IDs
    associating state with param_group. When loading from a state_dict,
    the optimizer will zip the param_group `params` (int IDs) and the
    optimizer `param_groups` (actual `cog.Parameter` s) in order to
    match state WITHOUT additional verification.

    Returns:
        A dictionary containing the optimizer state.

    Examples:
        A returned state dict might look something like:

        ```text
        {
            'state': {
                0: {
                    'momentum_buffer': [
                        (
                            Parameter(data='You are...', role='system prompt', requires_grad=True),
                            [Variable(data='The system prompt should...', role='gradient for system prompt')]
                        )
                    ]
                },
                1: {
                    'momentum_buffer': [
                        (
                            Parameter(data='Answer this...', role='instructin prompt', requires_grad=True),
                            [Variable(data='The instruction prompt must...', role='gradient to instruction prompt')]
                        )
                    ]
                }
            },
            'param_groups': [
                {
                    'model_client': {'class_type': 'AsyncOpenAI'},
                    'messages': [
                        {
                            'role': 'system',
                            'content': [Variable(data='You are part of an optimization system...', role='optimizer system prompt', requires_grad=False)]
                        },
                        {
                            'role': 'user',
                            'content': [Variable(data='Here is the variable you need...', role='optimizer user prompt', requires_grad=False)]
                        }
                    ],
                    'inputs': {},
                    'constraints': [],
                    'momentum': 2,
                    'completion_args': {'model': 'gpt-4o'},
                    'params': [0, 1]
                }
            ]
        }
        ```
    """  # noqa: E501

    # Save order indices instead of Variables
    param_mappings: Dict[int, int] = {}
    start_index = 0

    def pack_group(group: Dict[str, Any]) -> Dict[str, Any]:
        nonlocal start_index
        packed = {k: v for k, v in group.items() if k != "params"}

        # Custom serialization for model clients
        for key, value in packed.items():
            if isinstance(value, BaseModel):
                packed[key] = deepcopy(value)  # Trigger custom __deepcopy__

        param_mappings.update(
            {
                id(p): i
                for i, p in enumerate(group["params"], start_index)
                if id(p) not in param_mappings
            }
        )
        packed["params"] = [param_mappings[id(p)] for p in group["params"]]
        start_index += len(packed["params"])
        return packed

    param_groups = [pack_group(g) for g in self.param_groups]
    # Remap state to use order indices as keys
    packed_state = {
        (param_mappings[id(k)] if isinstance(k, Variable) else k): v
        for k, v in self.state.items()
    }

    state_dict = {
        "state": packed_state,
        "param_groups": param_groups,
    }

    return state_dict

load_state_dict(state_dict, model_clients=None)

Loads the optimizer state.

Parameters:

Name Type Description Default
state_dict StateDict

Optimizer state. Should be an object returned from a call to state_dict.

required
model_clients dict[str, BaseModel]

A dictionary mapping model client keys (e.g., 'fw_model_client') to their respective instances of BaseModel. These instances will be used to reconstruct any model clients referenced within the optimizer state. If a required model client is missing, an error will be raised with instructions on how to provide the missing client.

None

Raises:

Type Description
ValueError

If the provided state_dict is invalid, such as when the parameter groups or their sizes do not match the current optimizer configuration.

ValueError

If a required model client is missing from the model_clients dictionary, with details about the expected model client type and key.

Examples:

>>> openai_client = AsyncOpenAI()
>>> optimizer.load_state_dict(saved_state_dict, model_clients={
...     'model_client': openai_client)
... })
Source code in afnio/optim/optimizer.py
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
def load_state_dict(
    self, state_dict: StateDict, model_clients: Dict[str, BaseModel] = None
) -> None:
    """Loads the optimizer state.

    Args:
        state_dict: Optimizer state. Should be an object returned
            from a call to [`state_dict`][..state_dict].
        model_clients: A dictionary mapping model client keys
            (e.g., `'fw_model_client'`) to their respective instances of
            [`BaseModel`][afnio.models.model.BaseModel]. These instances
            will be used to reconstruct any model clients referenced within
            the optimizer state. If a required model client is missing, an
            error will be raised with instructions on how to provide the
            missing client.

    Raises:
        ValueError: If the provided `state_dict` is invalid, such as when the
            parameter groups or their sizes do not match the current optimizer
            configuration.
        ValueError: If a required model client is missing from the
            `model_clients` dictionary, with details about the expected
            model client type and key.

    Examples:
        >>> openai_client = AsyncOpenAI()
        >>> optimizer.load_state_dict(saved_state_dict, model_clients={
        ...     'model_client': openai_client)
        ... })
    """
    # shallow copy, to be consistent with module API
    state_dict = state_dict.copy()

    # Validate the state_dict
    groups = self.param_groups

    # Deepcopy as we write into saved_groups later to update state
    saved_groups = deepcopy(state_dict["param_groups"])

    if len(groups) != len(saved_groups):
        raise ValueError(
            "Loaded state dict has a different number of parameter groups."
        )
    param_lens = (len(g["params"]) for g in groups)
    saved_lens = (len(g["params"]) for g in saved_groups)
    if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
        raise ValueError(
            "Loaded state dict contains a parameter group "
            "that doesn't match the size of optimizer's group."
        )

    # Update the state
    id_map = dict(
        zip(
            chain.from_iterable(g["params"] for g in saved_groups),
            chain.from_iterable(g["params"] for g in groups),
        )
    )

    def _cast(param, value, param_id=None, param_groups=None, key=None):
        r"""Make a deep copy of value, casting all variables to device of param."""
        if isinstance(value, Variable):
            return Optimizer._process_value_according_to_param_policy(
                param, value, param_id, param_groups, key
            )
        elif isinstance(value, dict):
            return {
                k: _cast(
                    param, v, param_id=param_id, param_groups=param_groups, key=k
                )
                for k, v in value.items()
            }
        elif isinstance(value, Iterable):
            return type(value)(
                _cast(param, v, param_id=param_id, param_groups=param_groups)
                for v in value
            )
        else:
            return value

    # Copy state assigned to params (and cast variables to appropriate types).
    # State that is not assigned to params is copied as is (needed for
    # backward compatibility).
    state: DefaultDict[Variable, Dict[Any, Any]] = defaultdict(dict)
    for k, v in state_dict["state"].items():
        if k in id_map:
            param = id_map[k]
            state[param] = _cast(
                param, v, param_id=k, param_groups=state_dict["param_groups"]
            )
        else:
            state[k] = v

    # Update parameter groups, setting their 'params' value
    def update_group(
        group: Dict[str, Any], new_group: Dict[str, Any]
    ) -> Dict[str, Any]:
        new_group["params"] = group["params"]
        return new_group

    # Reconstruct model clients if needed
    model_clients = model_clients or {}
    for group in saved_groups:
        for key, value in group.items():
            if isinstance(value, dict) and "class_type" in value:
                cls_name = value["class_type"]
                cls = MODEL_REGISTRY.get(cls_name)
                if cls and issubclass(cls, BaseModel):
                    if key in model_clients:
                        # Create new model client istance
                        group[key] = model_clients[key]

                        # Add usage metadata to new model client instance
                        usage = value.get("usage", {})
                        group[key].update_usage(usage)
                    else:
                        raise ValueError(
                            f"Missing model client for '{key}' of expected type "
                            f"'{cls_name}'. Please provide an instance of "
                            f"'{cls_name}' using the `model_clients` input "
                            f"dictionary and retry "
                            f"the `load_state_dict()` operation."
                        )

    param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
    self.__setstate__({"state": state, "param_groups": param_groups})

clear_grad()

Resets the gradients of all optimized Variables by setting the grad attribute of each parameter to an empty list.

Source code in afnio/optim/optimizer.py
441
442
443
444
445
446
447
448
449
450
def clear_grad(self) -> None:
    """Resets the gradients of all optimized [`Variable`][afnio.Variable]s
    by setting the [`grad`][afnio.Variable.grad] attribute of each parameter
    to an empty list.
    """
    self._on_clear_grad()
    for group in self.param_groups:
        for p in group["params"]:
            if p.grad:
                p.grad.clear()

step(closure=None)

step(closure: None = ...) -> None
step(closure: Callable[[], Tuple[Variable, Variable]]) -> Tuple[Variable, Variable]

Performs a single optimization step (parameter update).

Parameters:

Name Type Description Default
closure Callable

A closure that reevaluates the agent and returns the loss as a tuple containing a numerical score and a textual explanation. This closure is optional for most optimizers.

None
Note

Unless otherwise specified, this function should not modify the grad field of the parameters.

Note

Some optimization algorithms need to reevaluate the function multiple times, so you have to pass in a closure that allows them to recompute your agent. The closure should clear the gradients, compute the loss, and return it.

Examples:

Example of using a closure as described in the note above:

>>> for input, target in dataset:
...     def closure():
...         optimizer.clear_grad()
...         output = agent(input)
...         loss = loss_fn(output, target)
...         loss.backward()
...         return loss
...
...     optimizer.step(closure)
Source code in afnio/optim/optimizer.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
def step(
    self, closure: Optional[Callable[[], Tuple[Variable, Variable]]] = None
) -> Optional[Tuple[Variable, Variable]]:
    """Performs a single optimization step (parameter update).

    Args:
        closure (Callable, optional): A closure that reevaluates the agent and
            returns the loss as a tuple containing a numerical score and a textual
            explanation. This closure is optional for most optimizers.

    Note:
        Unless otherwise specified, this function should not modify the
        [`grad`][afnio.Variable.grad] field of the parameters.

    Note:
        Some optimization algorithms need to reevaluate the function multiple times,
        so you have to pass in a `closure` that allows them to recompute your agent.
        The closure should clear the gradients, compute the loss, and return it.

    Examples:
        Example of using a closure as described in the note above:
        >>> for input, target in dataset:
        ...     def closure():
        ...         optimizer.clear_grad()
        ...         output = agent(input)
        ...         loss = loss_fn(output, target)
        ...         loss.backward()
        ...         return loss
        ...
        ...     optimizer.step(closure)
    """
    # Set `_pending_data` for all parameters that will be optimized
    for group in self.param_groups:
        for p in group["params"]:
            p._pending_data = True
            logger.debug(
                f"Marked variable {p.variable_id!r} as pending for data update."
            )

    try:
        # Get the singleton websocket client
        _, ws_client = get_default_clients()

        payload = {
            "optimizer_id": self.optimizer_id,
        }
        response = run_in_background_loop(ws_client.call("run_step", payload))
        if "error" in response:
            raise RuntimeError(
                response["error"]["data"].get("exception", response["error"])
            )

        logger.debug(
            f"Optimization instantiated and shared with the server: "
            f"optimizer_id={self.optimizer_id!r}"
        )

        result = response.get("result", {})
        result_message = result.get("message")
        result_state = result.get("state", [])

        # Extract all variable_ids from the result_state
        # and wait for them to be registered in VARIABLE_REGISTRY
        all_var_ids = self._extract_variable_ids_from_state(result_state)
        for var_id in all_var_ids:
            _wait_for_variable(var_id)

        des_result_state = _deserialize_state(result_state)

        # Convert [param, grads] lists to (param, grads) tuples
        for state in des_result_state.values():
            if "momentum_buffer" in state:
                state["momentum_buffer"] = [
                    (
                        tuple(pair)
                        if isinstance(pair, list) and len(pair) == 2
                        else pair
                    )
                    for pair in state["momentum_buffer"]
                ]

        if result_message != "Optimizer step executed successfully.":
            raise RuntimeError(
                f"Server did not return any data for optimization operation: "
                f"payload={payload!r}, response={response!r}"
            )

        self.state = des_result_state

        logger.debug(
            f"Optimization executed successfully: "
            f"optimizer_id={self.optimizer_id!r}"
        )

    except Exception as e:
        logger.error(f"Failed to run optimization on the server: {e}")

        # Clear all pending data flags to avoid deadlocks
        for group in self.param_groups:
            for p in group["params"]:
                p._pending_data = False
                logger.debug(
                    f"Marked variable {p.variable_id!r} as not pending for data "
                    f"update after error."
                )

        raise

add_param_group(param_group)

Add a param group to the Optimizer's param_groups.

This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the Optimizer as training progresses.

Parameters:

Name Type Description Default
param_group dict[str, Any]

Specifies what Variables should be optimized along with group specific optimization options.

required
Source code in afnio/optim/optimizer.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
def add_param_group(self, param_group: Dict[str, Any]) -> None:
    """Add a param group to the [`Optimizer`][..]'s `param_groups`.

    This can be useful when fine tuning a pre-trained network as frozen layers can
    be made trainable and added to the [`Optimizer`][..] as training progresses.

    Args:
        param_group: Specifies what Variables should be optimized along with
            group specific optimization options.
    """
    try:
        # Get the singleton websocket client
        _, ws_client = get_default_clients()

        messages = self.defaults.get("messages", [])
        payload = {
            "optimizer_id": self.optimizer_id,
            "messages": _serialize_arg(messages),
            "param_group": _serialize_arg(param_group),
        }
        response = run_in_background_loop(
            ws_client.call("add_param_group", payload)
        )
        if "error" in response:
            raise RuntimeError(
                response["error"]["data"].get("exception", response["error"])
            )

        logger.debug(
            f"Param group added and shared with the server: {param_group!r}"
        )

        result = response.get("result", {})
        param_group = result.get("param_group")

        if not param_group:
            raise RuntimeError(
                f"Server did not return a param_group "
                f"for payload: {payload!r}, response: {response!r}"
            )
        self.param_groups.append(_deserialize_output(param_group))
    except Exception as e:
        logger.error(f"Failed to add param group to the optimizer: {e}")
        raise