zencorn commited on
Commit
ac819bc
·
1 Parent(s): 1ff84db

add Agent Files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .pre-commit-config.yaml +46 -0
  2. .pylintrc +428 -0
  3. .readthedocs.yaml +15 -0
  4. LICENSE +201 -0
  5. MANIFEST.in +1 -0
  6. app.py +0 -0
  7. docs/en/Makefile +20 -0
  8. docs/en/_static/css/readthedocs.css +6 -0
  9. docs/en/_static/images/lagent_icon.png +0 -0
  10. docs/en/_static/images/robot.png +0 -0
  11. docs/en/_static/js/collapsed.js +1 -0
  12. docs/en/_static/js/table.js +31 -0
  13. docs/en/_templates/autoapi/index.rst +14 -0
  14. docs/en/_templates/autoapi/python/module.rst +112 -0
  15. docs/en/_templates/classtemplate.rst +14 -0
  16. docs/en/conf.py +108 -0
  17. docs/en/docutils.conf +2 -0
  18. docs/en/index.rst +40 -0
  19. docs/en/make.bat +36 -0
  20. docs/en/requirements.txt +4 -0
  21. docs/imgs/lagent_icon.png +0 -0
  22. docs/imgs/lagent_logo.png +0 -0
  23. docs/zh_cn/.readthedocs.yaml +15 -0
  24. docs/zh_cn/Makefile +20 -0
  25. docs/zh_cn/_static/css/readthedocs.css +6 -0
  26. docs/zh_cn/_static/images/lagent_icon.png +0 -0
  27. docs/zh_cn/_static/images/robot.png +0 -0
  28. docs/zh_cn/_static/js/collapsed.js +1 -0
  29. docs/zh_cn/_static/js/table.js +31 -0
  30. docs/zh_cn/_templates/autoapi/index.rst +14 -0
  31. docs/zh_cn/_templates/autoapi/python/module.rst +112 -0
  32. docs/zh_cn/_templates/classtemplate.rst +14 -0
  33. docs/zh_cn/conf.py +108 -0
  34. docs/zh_cn/cp_origin_docs.sh +9 -0
  35. docs/zh_cn/docutils.conf +2 -0
  36. docs/zh_cn/index.rst +39 -0
  37. docs/zh_cn/make.bat +36 -0
  38. examples/agent_api_web_demo.py +197 -0
  39. examples/model_cli_demo.py +63 -0
  40. examples/multi_agents_api_web_demo.py +198 -0
  41. examples/run_agent_lmdeploy.py +177 -0
  42. examples/run_agent_services.py +110 -0
  43. examples/run_async_agent_lmdeploy.py +70 -0
  44. examples/run_async_agent_lmdeploy_server.py +86 -0
  45. examples/run_async_agent_openai.py +61 -0
  46. examples/run_async_agent_vllm.py +58 -0
  47. examples/run_ray_async_agent_lmdeploy.py +65 -0
  48. lagent.egg-info/PKG-INFO +600 -0
  49. lagent.egg-info/SOURCES.txt +71 -0
  50. lagent.egg-info/dependency_links.txt +1 -0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: ^(tests/data|scripts|ftdp/protocols|ftdp/template_configs|ftdp/tool_dicts)/
2
+ repos:
3
+ - repo: https://github.com/PyCQA/flake8
4
+ rev: 7.0.0
5
+ hooks:
6
+ - id: flake8
7
+ - repo: https://github.com/PyCQA/isort
8
+ rev: 5.13.2
9
+ hooks:
10
+ - id: isort
11
+ - repo: https://github.com/psf/black
12
+ rev: 22.8.0
13
+ hooks:
14
+ - id: black
15
+ args: ["--line-length", "119", "--skip-string-normalization"]
16
+ - repo: https://github.com/pre-commit/pre-commit-hooks
17
+ rev: v4.5.0
18
+ hooks:
19
+ - id: trailing-whitespace
20
+ - id: check-yaml
21
+ - id: end-of-file-fixer
22
+ - id: requirements-txt-fixer
23
+ - id: double-quote-string-fixer
24
+ - id: check-merge-conflict
25
+ - id: fix-encoding-pragma
26
+ args: ["--remove"]
27
+ - id: mixed-line-ending
28
+ args: ["--fix=lf"]
29
+ - repo: https://github.com/executablebooks/mdformat
30
+ rev: 0.7.17
31
+ hooks:
32
+ - id: mdformat
33
+ args: ["--number"]
34
+ additional_dependencies:
35
+ - mdformat-openmmlab
36
+ - mdformat_frontmatter
37
+ - linkify-it-py
38
+ - repo: https://github.com/codespell-project/codespell
39
+ rev: v2.2.6
40
+ hooks:
41
+ - id: codespell
42
+ - repo: https://github.com/asottile/pyupgrade
43
+ rev: v3.15.0
44
+ hooks:
45
+ - id: pyupgrade
46
+ args: ["--py36-plus"]
.pylintrc ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This Pylint rcfile contains a best-effort configuration to uphold the
2
+ # best-practices and style described in the Google Python style guide:
3
+ # https://google.github.io/styleguide/pyguide.html
4
+ #
5
+ # Its canonical open-source location is:
6
+ # https://google.github.io/styleguide/pylintrc
7
+
8
+ [MASTER]
9
+
10
+ # Files or directories to be skipped. They should be base names, not paths.
11
+ ignore=third_party,storage
12
+
13
+ # Files or directories matching the regex patterns are skipped. The regex
14
+ # matches against base names, not paths.
15
+ ignore-patterns=
16
+
17
+ # Pickle collected data for later comparisons.
18
+ persistent=no
19
+
20
+ # List of plugins (as comma separated values of python modules names) to load,
21
+ # usually to register additional checkers.
22
+ load-plugins=
23
+
24
+ # Use multiple processes to speed up Pylint.
25
+ jobs=4
26
+
27
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
28
+ # active Python interpreter and may run arbitrary code.
29
+ unsafe-load-any-extension=no
30
+
31
+
32
+ [MESSAGES CONTROL]
33
+
34
+ # Only show warnings with the listed confidence levels. Leave empty to show
35
+ # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
36
+ confidence=
37
+
38
+ # Enable the message, report, category or checker with the given id(s). You can
39
+ # either give multiple identifier separated by comma (,) or put this option
40
+ # multiple time (only on the command line, not in the configuration file where
41
+ # it should appear only once). See also the "--disable" option for examples.
42
+ #enable=
43
+
44
+ # Disable the message, report, category or checker with the given id(s). You
45
+ # can either give multiple identifiers separated by comma (,) or put this
46
+ # option multiple times (only on the command line, not in the configuration
47
+ # file where it should appear only once).You can also use "--disable=all" to
48
+ # disable everything first and then reenable specific checks. For example, if
49
+ # you want to run only the similarities checker, you can use "--disable=all
50
+ # --enable=similarities". If you want to run only the classes checker, but have
51
+ # no Warning level messages displayed, use"--disable=all --enable=classes
52
+ # --disable=W"
53
+ disable=abstract-method,
54
+ apply-builtin,
55
+ arguments-differ,
56
+ attribute-defined-outside-init,
57
+ backtick,
58
+ bad-option-value,
59
+ basestring-builtin,
60
+ buffer-builtin,
61
+ c-extension-no-member,
62
+ consider-using-enumerate,
63
+ cmp-builtin,
64
+ cmp-method,
65
+ coerce-builtin,
66
+ coerce-method,
67
+ delslice-method,
68
+ div-method,
69
+ duplicate-code,
70
+ eq-without-hash,
71
+ execfile-builtin,
72
+ file-builtin,
73
+ filter-builtin-not-iterating,
74
+ fixme,
75
+ getslice-method,
76
+ global-statement,
77
+ hex-method,
78
+ idiv-method,
79
+ implicit-str-concat,
80
+ import-error,
81
+ import-self,
82
+ import-star-module-level,
83
+ inconsistent-return-statements,
84
+ input-builtin,
85
+ intern-builtin,
86
+ invalid-str-codec,
87
+ locally-disabled,
88
+ long-builtin,
89
+ long-suffix,
90
+ map-builtin-not-iterating,
91
+ misplaced-comparison-constant,
92
+ missing-function-docstring,
93
+ metaclass-assignment,
94
+ next-method-called,
95
+ next-method-defined,
96
+ no-absolute-import,
97
+ no-else-break,
98
+ no-else-continue,
99
+ no-else-raise,
100
+ no-else-return,
101
+ no-init, # added
102
+ no-member,
103
+ no-name-in-module,
104
+ no-self-use,
105
+ nonzero-method,
106
+ oct-method,
107
+ old-division,
108
+ old-ne-operator,
109
+ old-octal-literal,
110
+ old-raise-syntax,
111
+ parameter-unpacking,
112
+ print-statement,
113
+ raising-string,
114
+ range-builtin-not-iterating,
115
+ raw_input-builtin,
116
+ rdiv-method,
117
+ reduce-builtin,
118
+ relative-import,
119
+ reload-builtin,
120
+ round-builtin,
121
+ setslice-method,
122
+ signature-differs,
123
+ standarderror-builtin,
124
+ suppressed-message,
125
+ sys-max-int,
126
+ too-few-public-methods,
127
+ too-many-ancestors,
128
+ too-many-arguments,
129
+ too-many-boolean-expressions,
130
+ too-many-branches,
131
+ too-many-instance-attributes,
132
+ too-many-locals,
133
+ too-many-nested-blocks,
134
+ too-many-public-methods,
135
+ too-many-return-statements,
136
+ too-many-statements,
137
+ trailing-newlines,
138
+ unichr-builtin,
139
+ unicode-builtin,
140
+ unnecessary-pass,
141
+ unpacking-in-except,
142
+ useless-else-on-loop,
143
+ useless-object-inheritance,
144
+ useless-suppression,
145
+ using-cmp-argument,
146
+ wrong-import-order,
147
+ xrange-builtin,
148
+ zip-builtin-not-iterating,
149
+
150
+
151
+ [REPORTS]
152
+
153
+ # Set the output format. Available formats are text, parseable, colorized, msvs
154
+ # (visual studio) and html. You can also give a reporter class, eg
155
+ # mypackage.mymodule.MyReporterClass.
156
+ output-format=colorized
157
+
158
+ # Tells whether to display a full report or only the messages
159
+ reports=no
160
+
161
+ # Python expression which should return a note less than 10 (10 is the highest
162
+ # note). You have access to the variables errors warning, statement which
163
+ # respectively contain the number of errors / warnings messages and the total
164
+ # number of statements analyzed. This is used by the global evaluation report
165
+ # (RP0004).
166
+ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
167
+
168
+ # Template used to display messages. This is a python new-style format string
169
+ # used to format the message information. See doc for all details
170
+ #msg-template=
171
+
172
+
173
+ [BASIC]
174
+
175
+ # Good variable names which should always be accepted, separated by a comma
176
+ good-names=main,_
177
+
178
+ # Bad variable names which should always be refused, separated by a comma
179
+ bad-names=
180
+
181
+ # Colon-delimited sets of names that determine each other's naming style when
182
+ # the name regexes allow several styles.
183
+ name-group=
184
+
185
+ # Include a hint for the correct naming format with invalid-name
186
+ include-naming-hint=no
187
+
188
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
189
+ # to this list to register other decorators that produce valid properties.
190
+ property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl
191
+
192
+ # Regular expression matching correct function names
193
+ function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
194
+
195
+ # Regular expression matching correct variable names
196
+ variable-rgx=^[a-z][a-z0-9_]*$
197
+
198
+ # Regular expression matching correct constant names
199
+ const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
200
+
201
+ # Regular expression matching correct attribute names
202
+ attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
203
+
204
+ # Regular expression matching correct argument names
205
+ argument-rgx=^[a-z][a-z0-9_]*$
206
+
207
+ # Regular expression matching correct class attribute names
208
+ class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
209
+
210
+ # Regular expression matching correct inline iteration names
211
+ inlinevar-rgx=^[a-z][a-z0-9_]*$
212
+
213
+ # Regular expression matching correct class names
214
+ class-rgx=^_?[A-Z][a-zA-Z0-9]*$
215
+
216
+ # Regular expression matching correct module names
217
+ module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$
218
+
219
+ # Regular expression matching correct method names
220
+ method-rgx=(?x)^(?:(?P<exempt>_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
221
+
222
+ # Regular expression which should only match function or class names that do
223
+ # not require a docstring.
224
+ no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$
225
+
226
+ # Minimum line length for functions/classes that require docstrings, shorter
227
+ # ones are exempt.
228
+ docstring-min-length=10
229
+
230
+
231
+ [TYPECHECK]
232
+
233
+ # List of decorators that produce context managers, such as
234
+ # contextlib.contextmanager. Add to this list to register other decorators that
235
+ # produce valid context managers.
236
+ contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager
237
+
238
+ # Tells whether missing members accessed in mixin class should be ignored. A
239
+ # mixin class is detected if its name ends with "mixin" (case insensitive).
240
+ ignore-mixin-members=yes
241
+
242
+ # List of module names for which member attributes should not be checked
243
+ # (useful for modules/projects where namespaces are manipulated during runtime
244
+ # and thus existing member attributes cannot be deduced by static analysis. It
245
+ # supports qualified module names, as well as Unix pattern matching.
246
+ ignored-modules=
247
+
248
+ # List of class names for which member attributes should not be checked (useful
249
+ # for classes with dynamically set attributes). This supports the use of
250
+ # qualified names.
251
+ ignored-classes=optparse.Values,thread._local,_thread._local
252
+
253
+ # List of members which are set dynamically and missed by pylint inference
254
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
255
+ # expressions are accepted.
256
+ generated-members=
257
+
258
+
259
+ [FORMAT]
260
+
261
+ # Maximum number of characters on a single line.
262
+ max-line-length=120
263
+
264
+ # TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt
265
+ # lines made too long by directives to pytype.
266
+
267
+ # Regexp for a line that is allowed to be longer than the limit.
268
+ ignore-long-lines=(?x)(
269
+ ^\s*(\#\ )?<?https?://\S+>?$|
270
+ ^\s*(from\s+\S+\s+)?import\s+.+$)
271
+
272
+ # Allow the body of an if to be on the same line as the test if there is no
273
+ # else.
274
+ single-line-if-stmt=yes
275
+
276
+ # Maximum number of lines in a module
277
+ max-module-lines=99999
278
+
279
+ # String used as indentation unit. The internal Google style guide mandates 2
280
+ # spaces. Google's externaly-published style guide says 4, consistent with
281
+ # PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google
282
+ # projects (like TensorFlow).
283
+ indent-string=' '
284
+
285
+ # Number of spaces of indent required inside a hanging or continued line.
286
+ indent-after-paren=4
287
+
288
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
289
+ expected-line-ending-format=
290
+
291
+
292
+ [MISCELLANEOUS]
293
+
294
+ # List of note tags to take in consideration, separated by a comma.
295
+ notes=TODO
296
+
297
+
298
+ [STRING]
299
+
300
+ # This flag controls whether inconsistent-quotes generates a warning when the
301
+ # character used as a quote delimiter is used inconsistently within a module.
302
+ check-quote-consistency=yes
303
+
304
+
305
+ [VARIABLES]
306
+
307
+ # Tells whether we should check for unused import in __init__ files.
308
+ init-import=no
309
+
310
+ # A regular expression matching the name of dummy variables (i.e. expectedly
311
+ # not used).
312
+ dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
313
+
314
+ # List of additional names supposed to be defined in builtins. Remember that
315
+ # you should avoid to define new builtins when possible.
316
+ additional-builtins=
317
+
318
+ # List of strings which can identify a callback function by name. A callback
319
+ # name must start or end with one of those strings.
320
+ callbacks=cb_,_cb
321
+
322
+ # List of qualified module names which can have objects that can redefine
323
+ # builtins.
324
+ redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools
325
+
326
+
327
+ [LOGGING]
328
+
329
+ # Logging modules to check that the string format arguments are in logging
330
+ # function parameter format
331
+ logging-modules=logging,absl.logging,tensorflow.io.logging
332
+
333
+
334
+ [SIMILARITIES]
335
+
336
+ # Minimum lines number of a similarity.
337
+ min-similarity-lines=4
338
+
339
+ # Ignore comments when computing similarities.
340
+ ignore-comments=yes
341
+
342
+ # Ignore docstrings when computing similarities.
343
+ ignore-docstrings=yes
344
+
345
+ # Ignore imports when computing similarities.
346
+ ignore-imports=no
347
+
348
+
349
+ [SPELLING]
350
+
351
+ # Spelling dictionary name. Available dictionaries: none. To make it working
352
+ # install python-enchant package.
353
+ spelling-dict=
354
+
355
+ # List of comma separated words that should not be checked.
356
+ spelling-ignore-words=
357
+
358
+ # A path to a file that contains private dictionary; one word per line.
359
+ spelling-private-dict-file=
360
+
361
+ # Tells whether to store unknown words to indicated private dictionary in
362
+ # --spelling-private-dict-file option instead of raising a message.
363
+ spelling-store-unknown-words=no
364
+
365
+
366
+ [IMPORTS]
367
+
368
+ # Deprecated modules which should not be used, separated by a comma
369
+ deprecated-modules=regsub,
370
+ TERMIOS,
371
+ Bastion,
372
+ rexec,
373
+ sets
374
+
375
+ # Create a graph of every (i.e. internal and external) dependencies in the
376
+ # given file (report RP0402 must not be disabled)
377
+ import-graph=
378
+
379
+ # Create a graph of external dependencies in the given file (report RP0402 must
380
+ # not be disabled)
381
+ ext-import-graph=
382
+
383
+ # Create a graph of internal dependencies in the given file (report RP0402 must
384
+ # not be disabled)
385
+ int-import-graph=
386
+
387
+ # Force import order to recognize a module as part of the standard
388
+ # compatibility libraries.
389
+ known-standard-library=
390
+
391
+ # Force import order to recognize a module as part of a third party library.
392
+ known-third-party=enchant, absl
393
+
394
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
395
+ # 3 compatible code, which means that the block might have code that exists
396
+ # only in one or another interpreter, leading to false positives when analysed.
397
+ analyse-fallback-blocks=no
398
+
399
+
400
+ [CLASSES]
401
+
402
+ # List of method names used to declare (i.e. assign) instance attributes.
403
+ defining-attr-methods=__init__,
404
+ __new__,
405
+ setUp
406
+
407
+ # List of member names, which should be excluded from the protected access
408
+ # warning.
409
+ exclude-protected=_asdict,
410
+ _fields,
411
+ _replace,
412
+ _source,
413
+ _make
414
+
415
+ # List of valid names for the first argument in a class method.
416
+ valid-classmethod-first-arg=cls,
417
+ class_
418
+
419
+ # List of valid names for the first argument in a metaclass class method.
420
+ valid-metaclass-classmethod-first-arg=mcs
421
+
422
+
423
+ [EXCEPTIONS]
424
+
425
+ # Exceptions that will emit a warning when being caught. Defaults to
426
+ # "Exception"
427
+ overgeneral-exceptions=builtins.BaseException,
428
+ builtins.Exception
.readthedocs.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+
3
+ formats: all
4
+
5
+ build:
6
+ os: ubuntu-22.04
7
+ tools:
8
+ python: "3.10"
9
+
10
+ python:
11
+ install:
12
+ - requirements: requirements/docs.txt
13
+
14
+ sphinx:
15
+ configuration: docs/en/conf.py
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
MANIFEST.in ADDED
@@ -0,0 +1 @@
 
 
1
+ include requirements/*.txt
app.py ADDED
File without changes
docs/en/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
docs/en/_static/css/readthedocs.css ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .header-logo {
2
+ background-image: url("../images/lagent_icon.png");
3
+ background-size: 40px 40px;
4
+ height: 40px;
5
+ width: 40px;
6
+ }
docs/en/_static/images/lagent_icon.png ADDED
docs/en/_static/images/robot.png ADDED
docs/en/_static/js/collapsed.js ADDED
@@ -0,0 +1 @@
 
 
1
+ var collapsedSections = ['API Reference']
docs/en/_static/js/table.js ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ $(document).ready(function () {
2
+ table = $('.model-summary').DataTable({
3
+ "stateSave": false,
4
+ "lengthChange": false,
5
+ "pageLength": 10,
6
+ "order": [],
7
+ "scrollX": true,
8
+ "columnDefs": [
9
+ { "type": "summary", targets: '_all' },
10
+ ]
11
+ });
12
+ // Override the default sorting for the summary columns, which
13
+ // never takes the "-" character into account.
14
+ jQuery.extend(jQuery.fn.dataTableExt.oSort, {
15
+ "summary-asc": function (str1, str2) {
16
+ if (str1 == "<p>-</p>")
17
+ return 1;
18
+ if (str2 == "<p>-</p>")
19
+ return -1;
20
+ return ((str1 < str2) ? -1 : ((str1 > str2) ? 1 : 0));
21
+ },
22
+
23
+ "summary-desc": function (str1, str2) {
24
+ if (str1 == "<p>-</p>")
25
+ return 1;
26
+ if (str2 == "<p>-</p>")
27
+ return -1;
28
+ return ((str1 < str2) ? 1 : ((str1 > str2) ? -1 : 0));
29
+ }
30
+ });
31
+ })
docs/en/_templates/autoapi/index.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ API Reference
2
+ =============
3
+
4
+ This page contains auto-generated API reference documentation.
5
+
6
+ .. toctree::
7
+ :titlesonly:
8
+ :maxdepth: 3
9
+
10
+ {% for page in pages %}
11
+ {% if page.top_level_object and page.display %}
12
+ {{ page.include_path }}
13
+ {% endif %}
14
+ {% endfor %}
docs/en/_templates/autoapi/python/module.rst ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% if not obj.display %}
2
+ :orphan:
3
+
4
+ {% endif %}
5
+ :py:mod:`{{ obj.name if obj.name.count(".") <= 1 else obj.short_name }}`
6
+ =========={{ "=" * (obj.name|length if obj.name.count(".") <= 1 else obj.short_name|length) }}
7
+
8
+ .. py:module:: {{ obj.name }}
9
+
10
+ {% if obj.docstring %}
11
+ .. autoapi-nested-parse::
12
+
13
+ {{ obj.docstring|indent(3) }}
14
+
15
+ {% endif %}
16
+
17
+ {% block subpackages %}
18
+ {% set visible_subpackages = obj.subpackages|selectattr("display")|list %}
19
+ {% if visible_subpackages %}
20
+ Subpackages
21
+ -----------
22
+ .. toctree::
23
+ :titlesonly:
24
+ :maxdepth: 3
25
+
26
+ {% for subpackage in visible_subpackages %}
27
+ {{ subpackage.short_name }}/index.rst
28
+ {% endfor %}
29
+
30
+
31
+ {% endif %}
32
+ {% endblock %}
33
+ {% block submodules %}
34
+ {% set visible_submodules = obj.submodules|selectattr("display")|list %}
35
+ {% if visible_submodules %}
36
+ Submodules
37
+ ----------
38
+ .. toctree::
39
+ :titlesonly:
40
+ :maxdepth: 1
41
+
42
+ {% for submodule in visible_submodules %}
43
+ {{ submodule.short_name }}/index.rst
44
+ {% endfor %}
45
+
46
+
47
+ {% endif %}
48
+ {% endblock %}
49
+ {% block content %}
50
+ {% if obj.type is equalto("package") %}
51
+ {% set visible_children = obj.children|selectattr("display")|list %}
52
+ {% else %}
53
+ {% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %}
54
+ {% endif %}
55
+ {% if visible_children %}
56
+ {{ obj.type|title }} Contents
57
+ {{ "-" * obj.type|length }}---------
58
+
59
+ {% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %}
60
+ {% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %}
61
+ {% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %}
62
+ {% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %}
63
+ {% block classes scoped %}
64
+ {% if visible_classes %}
65
+ Classes
66
+ ~~~~~~~
67
+
68
+ .. autoapisummary::
69
+
70
+ {% for klass in visible_classes %}
71
+ {{ klass.id }}
72
+ {% endfor %}
73
+
74
+
75
+ {% endif %}
76
+ {% endblock %}
77
+
78
+ {% block functions scoped %}
79
+ {% if visible_functions %}
80
+ Functions
81
+ ~~~~~~~~~
82
+
83
+ .. autoapisummary::
84
+
85
+ {% for function in visible_functions %}
86
+ {{ function.id }}
87
+ {% endfor %}
88
+
89
+
90
+ {% endif %}
91
+ {% endblock %}
92
+
93
+ {% block attributes scoped %}
94
+ {% if visible_attributes %}
95
+ Attributes
96
+ ~~~~~~~~~~
97
+
98
+ .. autoapisummary::
99
+
100
+ {% for attribute in visible_attributes %}
101
+ {{ attribute.id }}
102
+ {% endfor %}
103
+
104
+
105
+ {% endif %}
106
+ {% endblock %}
107
+ {% endif %}
108
+ {% for obj_item in visible_children %}
109
+ {{ obj_item.render()|indent(0) }}
110
+ {% endfor %}
111
+ {% endif %}
112
+ {% endblock %}
docs/en/_templates/classtemplate.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. role:: hidden
2
+ :class: hidden-section
3
+ .. currentmodule:: {{ module }}
4
+
5
+
6
+ {{ name | underline}}
7
+
8
+ .. autoclass:: {{ name }}
9
+ :members:
10
+
11
+
12
+ ..
13
+ autogenerated from source/_templates/classtemplate.rst
14
+ note it does not have :inherited-members:
docs/en/conf.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+ # This file only contains a selection of the most common options. For a full
4
+ # list see the documentation:
5
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
+
7
+ # -- Path setup --------------------------------------------------------------
8
+
9
+ # If extensions (or modules to document with autodoc) are in another directory,
10
+ # add these directories to sys.path here. If the directory is relative to the
11
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
12
+
13
+ import os
14
+ import re
15
+ import sys
16
+
17
+ sys.path.insert(0, os.path.abspath('../..'))
18
+
19
+ # -- Project information -----------------------------------------------------
20
+ project = 'Lagent'
21
+ copyright = '2020-2030, InternLM'
22
+ author = 'InternLM'
23
+ language = 'en'
24
+
25
+ # The full version, including alpha/beta/rc tags
26
+ version_file = '../../lagent/version.py'
27
+ with open(version_file) as f:
28
+ exec(compile(f.read(), version_file, 'exec'))
29
+ __version__ = locals()['__version__']
30
+ release = __version__
31
+
32
+ # -- General configuration ---------------------------------------------------
33
+
34
+ # Add any Sphinx extension module names here, as strings. They can be
35
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
36
+ # ones.
37
+ extensions = [
38
+ 'sphinx_rtd_theme',
39
+ 'myst_nb',
40
+ 'autoapi.extension',
41
+ 'sphinx_markdown_tables',
42
+ 'sphinx.ext.autodoc',
43
+ 'sphinx.ext.napoleon',
44
+ 'sphinx.ext.viewcode',
45
+ ]
46
+
47
+ nb_output_stderr = 'remove-warn'
48
+ autodoc_typehints = 'description'
49
+
50
+ # sphinx-autoapi configuration
51
+ autoapi_dirs = ['../../lagent']
52
+ autoapi_options = [
53
+ 'members',
54
+ 'undoc-members',
55
+ 'show-inheritance',
56
+ 'show-module-summary',
57
+ ]
58
+ autoapi_ignore = ['*migrations*', '*command.py', '*cli.py']
59
+ autoapi_template_dir = '_templates/autoapi'
60
+ autoapi_add_toctree_entry = False
61
+
62
+ # Add any paths that contain templates here, relative to this directory.
63
+ templates_path = ['_templates']
64
+
65
+ # List of patterns, relative to source directory, that match files and
66
+ # directories to ignore when looking for source files.
67
+ # This pattern also affects html_static_path and html_extra_path.
68
+ exclude_patterns = []
69
+
70
+ # -- Options for HTML output -------------------------------------------------
71
+
72
+ # The theme to use for HTML and HTML Help pages. See the documentation for
73
+ # a list of builtin themes.
74
+ #
75
+ html_theme = 'sphinx_rtd_theme'
76
+ html_theme_options = {
77
+ 'navigation_depth': 3,
78
+ 'titles_only': False,
79
+ 'style_nav_header_background': '#4fabab',
80
+ }
81
+ html_context = {
82
+ 'display_github': True,
83
+ 'github_host': 'github.com',
84
+ 'github_user': 'InternLM',
85
+ 'github_repo': 'lagent',
86
+ 'github_version': 'main',
87
+ 'conf_py_path': '/docs/en/',
88
+ }
89
+ html_title = 'Lagent'
90
+ html_logo = '../imgs/lagent_logo.png'
91
+ html_favicon = '../imgs/lagent_icon.png'
92
+
93
+ master_doc = 'index'
94
+
95
+ # Add any paths that contain custom static files (such as style sheets) here,
96
+ # relative to this directory. They are copied after the builtin static files,
97
+ # so a file named 'default.css' will overwrite the builtin 'default.css'.
98
+ html_static_path = ['_static']
99
+
100
+
101
+ def custom_skip(app, what, name, obj, skip, options):
102
+ if what in ['data', 'function', 'class'] and re.search('logger', name):
103
+ skip = True
104
+ return skip
105
+
106
+
107
+ def setup(sphinx):
108
+ sphinx.connect('autoapi-skip-member', custom_skip)
docs/en/docutils.conf ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [html writers]
2
+ table_style: colwidths-auto
docs/en/index.rst ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Welcome to Lagent's documentation!
2
+ =======================================
3
+
4
+ You can switch between English and Chinese in the lower-left corner of the layout.
5
+
6
+ .. toctree::
7
+ :maxdepth: 2
8
+ :caption: Get Started
9
+
10
+ get_started/install.md
11
+ get_started/quickstart.md
12
+
13
+ .. toctree::
14
+ :maxdepth: 2
15
+ :caption: Tutorials
16
+
17
+ tutorials/action.md
18
+
19
+ .. toctree::
20
+ :caption: Switch Language
21
+
22
+ switch_language.md
23
+
24
+ .. toctree::
25
+ :maxdepth: 1
26
+ :caption: API Reference
27
+
28
+ autoapi/lagent/actions/index
29
+ autoapi/lagent/agents/index
30
+ autoapi/lagent/llms/index
31
+ autoapi/lagent/utils/index
32
+ autoapi/lagent/schema/index
33
+ autoapi/lagent/version/index
34
+
35
+
36
+ Indices and tables
37
+ ==================
38
+
39
+ * :ref:`genindex`
40
+ * :ref:`search`
docs/en/make.bat ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=.
11
+ set BUILDDIR=_build
12
+
13
+ if "%1" == "" goto help
14
+
15
+ %SPHINXBUILD% >NUL 2>NUL
16
+ if errorlevel 9009 (
17
+ echo.
18
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19
+ echo.installed, then set the SPHINXBUILD environment variable to point
20
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
21
+ echo.may add the Sphinx directory to PATH.
22
+ echo.
23
+ echo.If you don't have Sphinx installed, grab it from
24
+ echo.http://sphinx-doc.org/
25
+ exit /b 1
26
+ )
27
+
28
+
29
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
30
+ goto end
31
+
32
+ :help
33
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
34
+
35
+ :end
36
+ popd
docs/en/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ recommonmark
2
+ sphinx
3
+ sphinx_markdown_tables
4
+ sphinx_rtd_theme
docs/imgs/lagent_icon.png ADDED
docs/imgs/lagent_logo.png ADDED
docs/zh_cn/.readthedocs.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+
3
+ formats: all
4
+
5
+ build:
6
+ os: ubuntu-22.04
7
+ tools:
8
+ python: "3.10"
9
+
10
+ python:
11
+ install:
12
+ - requirements: requirements/docs.txt
13
+
14
+ sphinx:
15
+ configuration: docs/zh_cn/conf.py
docs/zh_cn/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
docs/zh_cn/_static/css/readthedocs.css ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .header-logo {
2
+ background-image: url("../images/lagent_icon.png");
3
+ background-size: 40px 40px;
4
+ height: 40px;
5
+ width: 40px;
6
+ }
docs/zh_cn/_static/images/lagent_icon.png ADDED
docs/zh_cn/_static/images/robot.png ADDED
docs/zh_cn/_static/js/collapsed.js ADDED
@@ -0,0 +1 @@
 
 
1
+ var collapsedSections = ['API 文档']
docs/zh_cn/_static/js/table.js ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ $(document).ready(function () {
2
+ table = $('.model-summary').DataTable({
3
+ "stateSave": false,
4
+ "lengthChange": false,
5
+ "pageLength": 10,
6
+ "order": [],
7
+ "scrollX": true,
8
+ "columnDefs": [
9
+ { "type": "summary", targets: '_all' },
10
+ ]
11
+ });
12
+ // Override the default sorting for the summary columns, which
13
+ // never takes the "-" character into account.
14
+ jQuery.extend(jQuery.fn.dataTableExt.oSort, {
15
+ "summary-asc": function (str1, str2) {
16
+ if (str1 == "<p>-</p>")
17
+ return 1;
18
+ if (str2 == "<p>-</p>")
19
+ return -1;
20
+ return ((str1 < str2) ? -1 : ((str1 > str2) ? 1 : 0));
21
+ },
22
+
23
+ "summary-desc": function (str1, str2) {
24
+ if (str1 == "<p>-</p>")
25
+ return 1;
26
+ if (str2 == "<p>-</p>")
27
+ return -1;
28
+ return ((str1 < str2) ? 1 : ((str1 > str2) ? -1 : 0));
29
+ }
30
+ });
31
+ })
docs/zh_cn/_templates/autoapi/index.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ API Reference
2
+ =============
3
+
4
+ This page contains auto-generated API reference documentation.
5
+
6
+ .. toctree::
7
+ :titlesonly:
8
+ :maxdepth: 3
9
+
10
+ {% for page in pages %}
11
+ {% if page.top_level_object and page.display %}
12
+ {{ page.include_path }}
13
+ {% endif %}
14
+ {% endfor %}
docs/zh_cn/_templates/autoapi/python/module.rst ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% if not obj.display %}
2
+ :orphan:
3
+
4
+ {% endif %}
5
+ :py:mod:`{{ obj.name if obj.name.count(".") <= 1 else obj.short_name }}`
6
+ =========={{ "=" * (obj.name|length if obj.name.count(".") <= 1 else obj.short_name|length) }}
7
+
8
+ .. py:module:: {{ obj.name }}
9
+
10
+ {% if obj.docstring %}
11
+ .. autoapi-nested-parse::
12
+
13
+ {{ obj.docstring|indent(3) }}
14
+
15
+ {% endif %}
16
+
17
+ {% block subpackages %}
18
+ {% set visible_subpackages = obj.subpackages|selectattr("display")|list %}
19
+ {% if visible_subpackages %}
20
+ Subpackages
21
+ -----------
22
+ .. toctree::
23
+ :titlesonly:
24
+ :maxdepth: 3
25
+
26
+ {% for subpackage in visible_subpackages %}
27
+ {{ subpackage.short_name }}/index.rst
28
+ {% endfor %}
29
+
30
+
31
+ {% endif %}
32
+ {% endblock %}
33
+ {% block submodules %}
34
+ {% set visible_submodules = obj.submodules|selectattr("display")|list %}
35
+ {% if visible_submodules %}
36
+ Submodules
37
+ ----------
38
+ .. toctree::
39
+ :titlesonly:
40
+ :maxdepth: 1
41
+
42
+ {% for submodule in visible_submodules %}
43
+ {{ submodule.short_name }}/index.rst
44
+ {% endfor %}
45
+
46
+
47
+ {% endif %}
48
+ {% endblock %}
49
+ {% block content %}
50
+ {% if obj.type is equalto("package") %}
51
+ {% set visible_children = obj.children|selectattr("display")|list %}
52
+ {% else %}
53
+ {% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %}
54
+ {% endif %}
55
+ {% if visible_children %}
56
+ {{ obj.type|title }} Contents
57
+ {{ "-" * obj.type|length }}---------
58
+
59
+ {% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %}
60
+ {% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %}
61
+ {% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %}
62
+ {% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %}
63
+ {% block classes scoped %}
64
+ {% if visible_classes %}
65
+ Classes
66
+ ~~~~~~~
67
+
68
+ .. autoapisummary::
69
+
70
+ {% for klass in visible_classes %}
71
+ {{ klass.id }}
72
+ {% endfor %}
73
+
74
+
75
+ {% endif %}
76
+ {% endblock %}
77
+
78
+ {% block functions scoped %}
79
+ {% if visible_functions %}
80
+ Functions
81
+ ~~~~~~~~~
82
+
83
+ .. autoapisummary::
84
+
85
+ {% for function in visible_functions %}
86
+ {{ function.id }}
87
+ {% endfor %}
88
+
89
+
90
+ {% endif %}
91
+ {% endblock %}
92
+
93
+ {% block attributes scoped %}
94
+ {% if visible_attributes %}
95
+ Attributes
96
+ ~~~~~~~~~~
97
+
98
+ .. autoapisummary::
99
+
100
+ {% for attribute in visible_attributes %}
101
+ {{ attribute.id }}
102
+ {% endfor %}
103
+
104
+
105
+ {% endif %}
106
+ {% endblock %}
107
+ {% endif %}
108
+ {% for obj_item in visible_children %}
109
+ {{ obj_item.render()|indent(0) }}
110
+ {% endfor %}
111
+ {% endif %}
112
+ {% endblock %}
docs/zh_cn/_templates/classtemplate.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. role:: hidden
2
+ :class: hidden-section
3
+ .. currentmodule:: {{ module }}
4
+
5
+
6
+ {{ name | underline}}
7
+
8
+ .. autoclass:: {{ name }}
9
+ :members:
10
+
11
+
12
+ ..
13
+ autogenerated from source/_templates/classtemplate.rst
14
+ note it does not have :inherited-members:
docs/zh_cn/conf.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+ # This file only contains a selection of the most common options. For a full
4
+ # list see the documentation:
5
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
+
7
+ # -- Path setup --------------------------------------------------------------
8
+
9
+ # If extensions (or modules to document with autodoc) are in another directory,
10
+ # add these directories to sys.path here. If the directory is relative to the
11
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
12
+
13
+ import os
14
+ import re
15
+ import sys
16
+
17
+ sys.path.insert(0, os.path.abspath('../..'))
18
+
19
+ # -- Project information -----------------------------------------------------
20
+ project = 'Lagent'
21
+ copyright = '2020-2030, InternLM'
22
+ author = 'InternLM'
23
+ language = 'zh_CN'
24
+
25
+ # The full version, including alpha/beta/rc tags
26
+ version_file = '../../lagent/version.py'
27
+ with open(version_file) as f:
28
+ exec(compile(f.read(), version_file, 'exec'))
29
+ __version__ = locals()['__version__']
30
+ release = __version__
31
+
32
+ # -- General configuration ---------------------------------------------------
33
+
34
+ # Add any Sphinx extension module names here, as strings. They can be
35
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
36
+ # ones.
37
+ extensions = [
38
+ 'sphinx_rtd_theme',
39
+ 'myst_nb',
40
+ 'autoapi.extension',
41
+ 'sphinx_markdown_tables',
42
+ 'sphinx.ext.autodoc',
43
+ 'sphinx.ext.napoleon',
44
+ 'sphinx.ext.viewcode',
45
+ ]
46
+
47
+ nb_output_stderr = 'remove-warn'
48
+ autodoc_typehints = 'description'
49
+
50
+ # sphinx-autoapi configuration
51
+ autoapi_dirs = ['../../lagent']
52
+ autoapi_options = [
53
+ 'members',
54
+ 'undoc-members',
55
+ 'show-inheritance',
56
+ 'show-module-summary',
57
+ ]
58
+ autoapi_ignore = ['*migrations*', '*command.py', '*cli.py']
59
+ autoapi_template_dir = '_templates/autoapi'
60
+ autoapi_add_toctree_entry = False
61
+
62
+ # Add any paths that contain templates here, relative to this directory.
63
+ templates_path = ['_templates']
64
+
65
+ # List of patterns, relative to source directory, that match files and
66
+ # directories to ignore when looking for source files.
67
+ # This pattern also affects html_static_path and html_extra_path.
68
+ exclude_patterns = []
69
+
70
+ # -- Options for HTML output -------------------------------------------------
71
+
72
+ # The theme to use for HTML and HTML Help pages. See the documentation for
73
+ # a list of builtin themes.
74
+ #
75
+ html_theme = 'sphinx_rtd_theme'
76
+ html_theme_options = {
77
+ 'navigation_depth': 3,
78
+ 'titles_only': False,
79
+ 'style_nav_header_background': '#4fabab',
80
+ }
81
+ html_context = {
82
+ 'display_github': True,
83
+ 'github_host': 'github.com',
84
+ 'github_user': 'InternLM',
85
+ 'github_repo': 'lagent',
86
+ 'github_version': 'main',
87
+ 'conf_py_path': '/docs/zh_cn/',
88
+ }
89
+ html_title = 'Lagent'
90
+ html_logo = '../imgs/lagent_logo.png'
91
+ html_favicon = '../imgs/lagent_icon.png'
92
+
93
+ master_doc = 'index'
94
+
95
+ # Add any paths that contain custom static files (such as style sheets) here,
96
+ # relative to this directory. They are copied after the builtin static files,
97
+ # so a file named 'default.css' will overwrite the builtin 'default.css'.
98
+ html_static_path = ['_static']
99
+
100
+
101
+ def custom_skip(app, what, name, obj, skip, options):
102
+ if what in ['data', 'function', 'class'] and re.search('logger', name):
103
+ skip = True
104
+ return skip
105
+
106
+
107
+ def setup(sphinx):
108
+ sphinx.connect('autoapi-skip-member', custom_skip)
docs/zh_cn/cp_origin_docs.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copy *.md files from docs/ if it doesn't have a Chinese translation
4
+
5
+ for filename in $(find ../en/ -name '*.md' -printf "%P\n");
6
+ do
7
+ mkdir -p $(dirname $filename)
8
+ cp -n ../en/$filename ./$filename
9
+ done
docs/zh_cn/docutils.conf ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [html writers]
2
+ table_style: colwidths-auto
docs/zh_cn/index.rst ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 欢迎来到 Lagent 的中文文档!
2
+ =======================================
3
+
4
+ 您可以在页面左下角切换中英文文档。
5
+
6
+ .. toctree::
7
+ :maxdepth: 2
8
+ :caption: 新手入门
9
+
10
+ get_started/install.md
11
+
12
+ .. toctree::
13
+ :maxdepth: 2
14
+ :caption: 教程
15
+
16
+ tutorials/action.md
17
+
18
+ .. toctree::
19
+ :caption: 切换语言
20
+
21
+ switch_language.md
22
+
23
+ .. toctree::
24
+ :maxdepth: 1
25
+ :caption: API 参考
26
+
27
+ autoapi/lagent/actions/index
28
+ autoapi/lagent/agents/index
29
+ autoapi/lagent/llms/index
30
+ autoapi/lagent/utils/index
31
+ autoapi/lagent/schema/index
32
+ autoapi/lagent/version/index
33
+
34
+
35
+ 导引
36
+ ==================
37
+
38
+ * :ref:`genindex`
39
+ * :ref:`search`
docs/zh_cn/make.bat ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=.
11
+ set BUILDDIR=_build
12
+
13
+ if "%1" == "" goto help
14
+
15
+ %SPHINXBUILD% >NUL 2>NUL
16
+ if errorlevel 9009 (
17
+ echo.
18
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19
+ echo.installed, then set the SPHINXBUILD environment variable to point
20
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
21
+ echo.may add the Sphinx directory to PATH.
22
+ echo.
23
+ echo.If you don't have Sphinx installed, grab it from
24
+ echo.http://sphinx-doc.org/
25
+ exit /b 1
26
+ )
27
+
28
+
29
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
30
+ goto end
31
+
32
+ :help
33
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
34
+
35
+ :end
36
+ popd
examples/agent_api_web_demo.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from typing import List
4
+ import streamlit as st
5
+ # from lagent.actions import ArxivSearch
6
+ from lagent.actions import ArxivSearch, WeatherQuery
7
+ from lagent.prompts.parsers import PluginParser
8
+ from lagent.agents.stream import INTERPRETER_CN, META_CN, PLUGIN_CN, AgentForInternLM, get_plugin_prompt
9
+ from lagent.llms import GPTAPI
10
+
11
+ class SessionState:
12
+ """管理会话状态的类。"""
13
+
14
+ def init_state(self):
15
+ """初始化会话状态变量。"""
16
+ st.session_state['assistant'] = [] # 助手消息历史
17
+ st.session_state['user'] = [] # 用户消息历史
18
+ # 初始化插件列表
19
+ action_list = [
20
+ ArxivSearch(),
21
+ WeatherQuery(),
22
+ ]
23
+ st.session_state['plugin_map'] = {action.name: action for action in action_list}
24
+ st.session_state['model_map'] = {} # 存储模型实例
25
+ st.session_state['model_selected'] = None # 当前选定模型
26
+ st.session_state['plugin_actions'] = set() # 当前激活插件
27
+ st.session_state['history'] = [] # 聊天历史
28
+ st.session_state['api_base'] = None # 初始化API base地址
29
+
30
+ def clear_state(self):
31
+ """清除当前会话状态。"""
32
+ st.session_state['assistant'] = []
33
+ st.session_state['user'] = []
34
+ st.session_state['model_selected'] = None
35
+
36
+
37
+ class StreamlitUI:
38
+ """管理 Streamlit 界面的类。"""
39
+
40
+ def __init__(self, session_state: SessionState):
41
+ self.session_state = session_state
42
+ self.plugin_action = [] # 当前选定的插件
43
+ # 初始化提示词
44
+ self.meta_prompt = META_CN
45
+ self.plugin_prompt = PLUGIN_CN
46
+ self.init_streamlit()
47
+
48
+ def init_streamlit(self):
49
+ """初始化 Streamlit 的 UI 设置。"""
50
+ st.set_page_config(
51
+ layout='wide',
52
+ page_title='lagent-web',
53
+ page_icon='./docs/imgs/lagent_icon.png'
54
+ )
55
+ st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
56
+
57
+ def setup_sidebar(self):
58
+ """设置侧边栏,选择模型和插件。"""
59
+ # 模型名称和 API Base 输入框
60
+ model_name = st.sidebar.text_input('模型名称:', value='internlm2.5-latest')
61
+
62
+ # ================================== 硅基流动的API ==================================
63
+ # 注意,如果采用硅基流动API,模型名称需要更改为:internlm/internlm2_5-7b-chat 或者 internlm/internlm2_5-20b-chat
64
+ # api_base = st.sidebar.text_input(
65
+ # 'API Base 地址:', value='https://api.siliconflow.cn/v1/chat/completions'
66
+ # )
67
+ # ================================== 浦语官方的API ==================================
68
+ api_base = st.sidebar.text_input(
69
+ 'API Base 地址:', value='https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions'
70
+ )
71
+ # ==================================================================================
72
+ # 插件选择
73
+ plugin_name = st.sidebar.multiselect(
74
+ '插件选择',
75
+ options=list(st.session_state['plugin_map'].keys()),
76
+ default=[],
77
+ )
78
+
79
+ # 根据选择的插件生成插件操作列表
80
+ self.plugin_action = [st.session_state['plugin_map'][name] for name in plugin_name]
81
+
82
+ # 动态生成插件提示
83
+ if self.plugin_action:
84
+ self.plugin_prompt = get_plugin_prompt(self.plugin_action)
85
+
86
+ # 清空对话按钮
87
+ if st.sidebar.button('清空对话', key='clear'):
88
+ self.session_state.clear_state()
89
+
90
+ return model_name, api_base, self.plugin_action
91
+
92
+ def initialize_chatbot(self, model_name, api_base, plugin_action):
93
+ """初始化 GPTAPI 实例作为 chatbot。"""
94
+ token = os.getenv("token")
95
+ if not token:
96
+ st.error("未检测到环境变量 `token`,请设置环境变量,例如 `export token='your_token_here'` 后重新运行 X﹏X")
97
+ st.stop() # 停止运行应用
98
+
99
+ # 创建完整的 meta_prompt,保留原始结构并动态插入侧边栏配置
100
+ meta_prompt = [
101
+ {"role": "system", "content": self.meta_prompt, "api_role": "system"},
102
+ {"role": "user", "content": "", "api_role": "user"},
103
+ {"role": "assistant", "content": self.plugin_prompt, "api_role": "assistant"},
104
+ {"role": "environment", "content": "", "api_role": "environment"}
105
+ ]
106
+
107
+ api_model = GPTAPI(
108
+ model_type=model_name,
109
+ api_base=api_base,
110
+ key=token, # 从环境变量中获取授权令牌
111
+ meta_template=meta_prompt,
112
+ max_new_tokens=512,
113
+ temperature=0.8,
114
+ top_p=0.9
115
+ )
116
+ return api_model
117
+
118
+ def render_user(self, prompt: str):
119
+ """渲染用户输入内容。"""
120
+ with st.chat_message('user'):
121
+ st.markdown(prompt)
122
+
123
+ def render_assistant(self, agent_return):
124
+ """渲染助手响应内容。"""
125
+ with st.chat_message('assistant'):
126
+ content = getattr(agent_return, "content", str(agent_return))
127
+ st.markdown(content if isinstance(content, str) else str(content))
128
+
129
+
130
+ def main():
131
+ """主函数,运行 Streamlit 应用。"""
132
+ if 'ui' not in st.session_state:
133
+ session_state = SessionState()
134
+ session_state.init_state()
135
+ st.session_state['ui'] = StreamlitUI(session_state)
136
+ else:
137
+ st.set_page_config(
138
+ layout='wide',
139
+ page_title='lagent-web',
140
+ page_icon='./docs/imgs/lagent_icon.png'
141
+ )
142
+ st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
143
+
144
+ # 设置侧边栏并获取模型和插件信息
145
+ model_name, api_base, plugin_action = st.session_state['ui'].setup_sidebar()
146
+ plugins = [dict(type=f"lagent.actions.{plugin.__class__.__name__}") for plugin in plugin_action]
147
+
148
+ if (
149
+ 'chatbot' not in st.session_state or
150
+ model_name != st.session_state['chatbot'].model_type or
151
+ 'last_plugin_action' not in st.session_state or
152
+ plugin_action != st.session_state['last_plugin_action'] or
153
+ api_base != st.session_state['api_base']
154
+ ):
155
+ # 更新 Chatbot
156
+ st.session_state['chatbot'] = st.session_state['ui'].initialize_chatbot(model_name, api_base, plugin_action)
157
+ st.session_state['last_plugin_action'] = plugin_action # 更新插件状态
158
+ st.session_state['api_base'] = api_base # 更新 API Base 地址
159
+
160
+ # 初始化 AgentForInternLM
161
+ st.session_state['agent'] = AgentForInternLM(
162
+ llm=st.session_state['chatbot'],
163
+ plugins=plugins,
164
+ output_format=dict(
165
+ type=PluginParser,
166
+ template=PLUGIN_CN,
167
+ prompt=get_plugin_prompt(plugin_action)
168
+ )
169
+ )
170
+ # 清空对话历史
171
+ st.session_state['session_history'] = []
172
+
173
+ if 'agent' not in st.session_state:
174
+ st.session_state['agent'] = None
175
+
176
+ agent = st.session_state['agent']
177
+ for prompt, agent_return in zip(st.session_state['user'], st.session_state['assistant']):
178
+ st.session_state['ui'].render_user(prompt)
179
+ st.session_state['ui'].render_assistant(agent_return)
180
+
181
+ # 处理用户输入
182
+ if user_input := st.chat_input(''):
183
+ st.session_state['ui'].render_user(user_input)
184
+
185
+ # 调用模型时确保侧边栏的系统提示词和插件提示词生效
186
+ res = agent(user_input, session_id=0)
187
+ st.session_state['ui'].render_assistant(res)
188
+
189
+ # 更新会话状态
190
+ st.session_state['user'].append(user_input)
191
+ st.session_state['assistant'].append(copy.deepcopy(res))
192
+
193
+ st.session_state['last_status'] = None
194
+
195
+
196
+ if __name__ == '__main__':
197
+ main()
examples/model_cli_demo.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+
3
+ from lagent.llms import HFTransformer
4
+ from lagent.llms.meta_template import INTERNLM2_META as META
5
+
6
+
7
+ def parse_args():
8
+ parser = ArgumentParser(description='chatbot')
9
+ parser.add_argument(
10
+ '--path',
11
+ type=str,
12
+ default='internlm/internlm2-chat-20b',
13
+ help='The path to the model')
14
+ parser.add_argument(
15
+ '--mode',
16
+ type=str,
17
+ default='chat',
18
+ help='Completion through chat or generate')
19
+ args = parser.parse_args()
20
+ return args
21
+
22
+
23
+ def main():
24
+ args = parse_args()
25
+ # Initialize the HFTransformer-based Language Model (llm)
26
+ model = HFTransformer(
27
+ path=args.path,
28
+ meta_template=META,
29
+ max_new_tokens=1024,
30
+ top_p=0.8,
31
+ top_k=None,
32
+ temperature=0.1,
33
+ repetition_penalty=1.0,
34
+ stop_words=['<|im_end|>'])
35
+
36
+ def input_prompt():
37
+ print('\ndouble enter to end input >>> ', end='', flush=True)
38
+ sentinel = '' # ends when this string is seen
39
+ return '\n'.join(iter(input, sentinel))
40
+
41
+ history = []
42
+ while True:
43
+ try:
44
+ prompt = input_prompt()
45
+ except UnicodeDecodeError:
46
+ print('UnicodeDecodeError')
47
+ continue
48
+ if prompt == 'exit':
49
+ exit(0)
50
+ history.append(dict(role='user', content=prompt))
51
+ if args.mode == 'generate':
52
+ history = [dict(role='user', content=prompt)]
53
+ print('\nInternLm2:', end='')
54
+ current_length = 0
55
+ for status, response, _ in model.stream_chat(history):
56
+ print(response[current_length:], end='', flush=True)
57
+ current_length = len(response)
58
+ history.append(dict(role='assistant', content=response))
59
+ print('')
60
+
61
+
62
+ if __name__ == '__main__':
63
+ main()
examples/multi_agents_api_web_demo.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ import json
4
+ import re
5
+ import requests
6
+ import streamlit as st
7
+
8
+ from lagent.agents import Agent
9
+ from lagent.prompts.parsers import PluginParser
10
+ from lagent.agents.stream import PLUGIN_CN, get_plugin_prompt
11
+ from lagent.schema import AgentMessage
12
+ from lagent.actions import ArxivSearch
13
+ from lagent.hooks import Hook
14
+ from lagent.llms import GPTAPI
15
+
16
+ YOUR_TOKEN_HERE = os.getenv("token")
17
+ if not YOUR_TOKEN_HERE:
18
+ raise EnvironmentError("未找到环境变量 'token',请设置后再运行程序。")
19
+
20
+ # Hook类,用于对消息添加前缀
21
+ class PrefixedMessageHook(Hook):
22
+ def __init__(self, prefix, senders=None):
23
+ """
24
+ 初始化Hook
25
+ :param prefix: 消息前缀
26
+ :param senders: 指定发送者列表
27
+ """
28
+ self.prefix = prefix
29
+ self.senders = senders or []
30
+
31
+ def before_agent(self, agent, messages, session_id):
32
+ """
33
+ 在代理处理消息前修改消息内容
34
+ :param agent: 当前代理
35
+ :param messages: 消息列表
36
+ :param session_id: 会话ID
37
+ """
38
+ for message in messages:
39
+ if message.sender in self.senders:
40
+ message.content = self.prefix + message.content
41
+
42
+ class AsyncBlogger:
43
+ """博客生成类,整合写作者和批评者。"""
44
+
45
+ def __init__(self, model_type, api_base, writer_prompt, critic_prompt, critic_prefix='', max_turn=2):
46
+ """
47
+ 初始化博客生成器
48
+ :param model_type: 模型类型
49
+ :param api_base: API 基地址
50
+ :param writer_prompt: 写作者提示词
51
+ :param critic_prompt: 批评者提示词
52
+ :param critic_prefix: 批评消息前缀
53
+ :param max_turn: 最大轮次
54
+ """
55
+ self.model_type = model_type
56
+ self.api_base = api_base
57
+ self.llm = GPTAPI(
58
+ model_type=model_type,
59
+ api_base=api_base,
60
+ key=YOUR_TOKEN_HERE,
61
+ max_new_tokens=4096,
62
+ )
63
+ self.plugins = [dict(type='lagent.actions.ArxivSearch')]
64
+ self.writer = Agent(
65
+ self.llm,
66
+ writer_prompt,
67
+ name='写作者',
68
+ output_format=dict(
69
+ type=PluginParser,
70
+ template=PLUGIN_CN,
71
+ prompt=get_plugin_prompt(self.plugins)
72
+ )
73
+ )
74
+ self.critic = Agent(
75
+ self.llm,
76
+ critic_prompt,
77
+ name='批评者',
78
+ hooks=[PrefixedMessageHook(critic_prefix, ['写作者'])]
79
+ )
80
+ self.max_turn = max_turn
81
+
82
+ async def forward(self, message: AgentMessage, update_placeholder):
83
+ """
84
+ 执行多阶段博客生成流程
85
+ :param message: 初始消息
86
+ :param update_placeholder: Streamlit占位符
87
+ :return: 最终优化的博客内容
88
+ """
89
+ step1_placeholder = update_placeholder.container()
90
+ step2_placeholder = update_placeholder.container()
91
+ step3_placeholder = update_placeholder.container()
92
+
93
+ # 第一步:生成初始内容
94
+ step1_placeholder.markdown("**Step 1: 生成初始内容...**")
95
+ message = self.writer(message)
96
+ if message.content:
97
+ step1_placeholder.markdown(f"**生成的初始内容**:\n\n{message.content}")
98
+ else:
99
+ step1_placeholder.markdown("**生成的初始内容为空,请检查生成逻辑。**")
100
+
101
+ # 第二步:批评者提供反馈
102
+ step2_placeholder.markdown("**Step 2: 批评者正在提供反馈和文献推荐...**")
103
+ message = self.critic(message)
104
+ if message.content:
105
+ # 解析批评者反馈
106
+ suggestions = re.search(r"1\. 批评建议:\n(.*?)2\. 推荐的关键词:", message.content, re.S)
107
+ keywords = re.search(r"2\. 推荐的关键词:\n- (.*)", message.content)
108
+ feedback = suggestions.group(1).strip() if suggestions else "未提供批评建议"
109
+ keywords = keywords.group(1).strip() if keywords else "未提供关键词"
110
+
111
+ # Arxiv 文献查询
112
+ arxiv_search = ArxivSearch()
113
+ arxiv_results = arxiv_search.get_arxiv_article_information(keywords)
114
+
115
+ # 显示批评内容和文献推荐
116
+ message.content = f"**批评建议**:\n{feedback}\n\n**推荐的文献**:\n{arxiv_results}"
117
+ step2_placeholder.markdown(f"**批评和文献推荐**:\n\n{message.content}")
118
+ else:
119
+ step2_placeholder.markdown("**批评内容为空,请检查批评逻辑。**")
120
+
121
+ # 第三步:写作者根据反馈优化内容
122
+ step3_placeholder.markdown("**Step 3: 根据反馈改进内容...**")
123
+ improvement_prompt = AgentMessage(
124
+ sender="critic",
125
+ content=(
126
+ f"根据以下批评建议和推荐文献对内容进行改进:\n\n"
127
+ f"批评建议:\n{feedback}\n\n"
128
+ f"推荐文献:\n{arxiv_results}\n\n"
129
+ f"请优化��始内容,使其更加清晰、丰富,并符合专业水准。"
130
+ ),
131
+ )
132
+ message = self.writer(improvement_prompt)
133
+ if message.content:
134
+ step3_placeholder.markdown(f"**最终优化的博客内容**:\n\n{message.content}")
135
+ else:
136
+ step3_placeholder.markdown("**最终优化的博客内容为空,请检查生成逻辑。**")
137
+
138
+ return message
139
+
140
+ def setup_sidebar():
141
+ """设置侧边栏,选择模型。"""
142
+ model_name = st.sidebar.text_input('模型名称:', value='internlm2.5-latest')
143
+ api_base = st.sidebar.text_input(
144
+ 'API Base 地址:', value='https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions'
145
+ )
146
+
147
+ return model_name, api_base
148
+
149
+ def main():
150
+ """
151
+ 主函数:构建Streamlit界面并处理用户交互
152
+ """
153
+ st.set_page_config(layout='wide', page_title='Lagent Web Demo', page_icon='🤖')
154
+ st.title("多代理博客优化助手")
155
+
156
+ model_type, api_base = setup_sidebar()
157
+ topic = st.text_input('输入一个话题:', 'Self-Supervised Learning')
158
+ generate_button = st.button('生成博客内容')
159
+
160
+ if (
161
+ 'blogger' not in st.session_state or
162
+ st.session_state['model_type'] != model_type or
163
+ st.session_state['api_base'] != api_base
164
+ ):
165
+ st.session_state['blogger'] = AsyncBlogger(
166
+ model_type=model_type,
167
+ api_base=api_base,
168
+ writer_prompt="你是一位优秀的AI内容写作者,请撰写一篇有吸引力且信息丰富的博客内容。",
169
+ critic_prompt="""
170
+ 作为一位严谨的批评者,请给出建设性的批评和改进建议,并基于相关主题使用已有的工具推荐一些参考文献,推荐的关键词应该是英语形式,简洁且切题。
171
+ 请按照以下格式提供反馈:
172
+ 1. 批评建议:
173
+ - (具体建议)
174
+ 2. 推荐的关键词:
175
+ - (关键词1, 关键词2, ...)
176
+ """,
177
+ critic_prefix="请批评以下内容,并提供改进建议:\n\n"
178
+ )
179
+ st.session_state['model_type'] = model_type
180
+ st.session_state['api_base'] = api_base
181
+
182
+ if generate_button:
183
+ update_placeholder = st.empty()
184
+
185
+ async def run_async_blogger():
186
+ message = AgentMessage(
187
+ sender='user',
188
+ content=f"请撰写一篇关于{topic}的博客文章,要求表达专业,生动有趣,并且易于理解。"
189
+ )
190
+ result = await st.session_state['blogger'].forward(message, update_placeholder)
191
+ return result
192
+
193
+ loop = asyncio.new_event_loop()
194
+ asyncio.set_event_loop(loop)
195
+ loop.run_until_complete(run_async_blogger())
196
+
197
+ if __name__ == '__main__':
198
+ main()
examples/run_agent_lmdeploy.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+
4
+ from lagent.agents.aggregator import InternLMToolAggregator
5
+ from lagent.agents.stream import PLUGIN_CN, AgentForInternLM, MathCoder, get_plugin_prompt
6
+ from lagent.llms import INTERNLM2_META, LMDeployPipeline
7
+ from lagent.prompts.parsers import PluginParser
8
+
9
+ model = LMDeployPipeline(
10
+ path='internlm/internlm2_5-7b-chat',
11
+ meta_template=INTERNLM2_META,
12
+ tp=1,
13
+ top_k=1,
14
+ temperature=1.0,
15
+ stop_words=['<|im_end|>', '<|action_end|>'],
16
+ max_new_tokens=1024,
17
+ )
18
+
19
+ # ----------------------- interpreter -----------------------
20
+ print('-' * 80, 'interpreter', '-' * 80)
21
+ coder = MathCoder(
22
+ llm=model,
23
+ aggregator=InternLMToolAggregator(few_shot=[[
24
+ {
25
+ 'role':
26
+ 'user',
27
+ 'content':
28
+ "Mike paid $\\$1.25$ for a stamp three years ago. He was just offered double that amount for the stamp. Assuming the stamp's offer price doubles every three years, how many dollars will he be offered in 12 more years?"
29
+ },
30
+ {
31
+ 'role':
32
+ 'language',
33
+ 'content':
34
+ "Let's break down the problem into smaller steps to understand how to approach it.\n\n**Step 1:** Determine the initial amount paid for the stamp, which is $1.25.\n\n**Step 2:** Understand the pattern of the offer price increase. The offer price doubles every three years.\n\n**Step 3:** Calculate the offer price in 12 more years.\n\nSince the offer price doubles every three years, in 12 years, the price will double 12 / 3 = 4 times.\n\nWe can represent the offer price after 12 more years as:\n\\[ \\text{Offer Price} = \\text{Initial Price} \\times 2^{(\\text{Number of Doublings})} \\]\n\nLet's calculate this using Python to get the exact figure.\n\n"
35
+ },
36
+ {
37
+ 'role': 'tool',
38
+ 'content':
39
+ '```python\nfrom sympy import symbols, Eq, solve\n\n# Define the variables\ninitial_price = 1.25\ndoublings_in_12_years = 12 / 3\n\n# Calculate the offer price after 12 years\noffer_price_12_years = initial_price * 2**doublings_in_12_years\n\noffer_price_12_years\n```',
40
+ 'name': 'interpreter',
41
+ },
42
+ {
43
+ 'role': 'environment',
44
+ 'content': '20.0',
45
+ 'name': 'interpreter'
46
+ },
47
+ {
48
+ 'role':
49
+ 'language',
50
+ 'content':
51
+ 'After calculating the given formula, the offer price for the stamp in 12 more years is found to be $\\boxed{20.0}$ dollars.'
52
+ },
53
+ ]]))
54
+ query = (
55
+ 'Marie is thinking of a multiple of 63, while Jay is thinking of a factor '
56
+ 'of 63. They happen to be thinking of the same number. There are two '
57
+ 'possibilities for the number that each of them is thinking of, one '
58
+ 'positive and one negative. Find the product of these two numbers.')
59
+ res = coder(query, session_id=0)
60
+ # print(res.model_dump_json())
61
+ # print('-' * 120)
62
+ print(coder.get_steps(0))
63
+ import json
64
+
65
+ print(
66
+ json.dumps(
67
+ coder.agent.aggregator.aggregate(coder.agent.memory, coder.agent.name,
68
+ coder.agent.output_format),
69
+ ensure_ascii=False,
70
+ indent=4))
71
+
72
+ state_dict = {
73
+ 'memory': [{
74
+ 'sender': 'user',
75
+ 'content':
76
+ 'Marie is thinking of a multiple of 63, while Jay is thinking of a factor of 63. They happen to be thinking of the same number. There are two possibilities for the number that each of them is thinking of, one positive and one negative. Find the product of these two numbers.',
77
+ 'formatted': None,
78
+ 'type': None,
79
+ 'receiver': None
80
+ }, {
81
+ 'sender': 'Agent',
82
+ 'content':
83
+ 'The product of the two numbers, \\(63\\) and \\(-63\\), is \\(-3969\\). Therefore, the product of the two numbers that Marie and Jay are thinking of is \\(\\boxed{-3969}\\).',
84
+ 'formatted': {
85
+ 'tool_type': None,
86
+ 'thought':
87
+ 'The product of the two numbers, \\(63\\) and \\(-63\\), is \\(-3969\\). Therefore, the product of the two numbers that Marie and Jay are thinking of is \\(\\boxed{-3969}\\).',
88
+ 'action': None,
89
+ 'status': 0
90
+ },
91
+ 'type': None,
92
+ 'receiver': None
93
+ }],
94
+ 'agent.memory': [{
95
+ 'sender': 'user',
96
+ 'content':
97
+ 'Marie is thinking of a multiple of 63, while Jay is thinking of a factor of 63. They happen to be thinking of the same number. There are two possibilities for the number that each of them is thinking of, one positive and one negative. Find the product of these two numbers.',
98
+ 'formatted': None,
99
+ 'type': None,
100
+ 'receiver': None
101
+ }, {
102
+ 'sender': 'Agent',
103
+ 'content':
104
+ "To solve this problem, we first need to identify the factors of 63. Since Marie is thinking of a multiple of 63 and Jay is thinking of a factor of 63, and they are thinking of the same number, we need to find the factors of 63 and then determine the multiples of 63 that match these factors.\n\nLet's start by finding the factors of 63 using Python.<|action_start|><|interpreter|>\n```python\nfrom sympy import divisors\n\n# Find the factors of 63\nfactors_of_63 = divisors(63)\nfactors_of_63\n```",
105
+ 'formatted': {
106
+ 'tool_type': 'interpreter',
107
+ 'thought':
108
+ "To solve this problem, we first need to identify the factors of 63. Since Marie is thinking of a multiple of 63 and Jay is thinking of a factor of 63, and they are thinking of the same number, we need to find the factors of 63 and then determine the multiples of 63 that match these factors.\n\nLet's start by finding the factors of 63 using Python.",
109
+ 'action':
110
+ '```python\nfrom sympy import divisors\n\n# Find the factors of 63\nfactors_of_63 = divisors(63)\nfactors_of_63\n```',
111
+ 'status': 1
112
+ },
113
+ 'type': None,
114
+ 'receiver': None
115
+ }, {
116
+ 'sender': 'ActionExecutor',
117
+ 'content': '[1, 3, 7, 9, 21, 63]',
118
+ 'formatted': None,
119
+ 'type': None,
120
+ 'receiver': None
121
+ }, {
122
+ 'sender': 'Agent',
123
+ 'content':
124
+ "The factors of 63 are \\(1, 3, 7, 9, 21,\\) and \\(63\\). Since Marie is thinking of a multiple of 63 and Jay is thinking of a factor of 63, and they are thinking of the same number, the number they are thinking of must be one of the factors of 63.\n\nGiven that there are two possibilities for the number they are thinking of, one positive and one negative, we need to consider both \\(63\\) and \\(-63\\) as the number they might be thinking of.\n\nNow, let's calculate the product of these two numbers, \\(63\\) and \\(-63\\).<|action_start|><|interpreter|>\n```python\n# Calculate the product of 63 and -63\nproduct = 63 * (-63)\nproduct\n```",
125
+ 'formatted': {
126
+ 'tool_type': 'interpreter',
127
+ 'thought':
128
+ "The factors of 63 are \\(1, 3, 7, 9, 21,\\) and \\(63\\). Since Marie is thinking of a multiple of 63 and Jay is thinking of a factor of 63, and they are thinking of the same number, the number they are thinking of must be one of the factors of 63.\n\nGiven that there are two possibilities for the number they are thinking of, one positive and one negative, we need to consider both \\(63\\) and \\(-63\\) as the number they might be thinking of.\n\nNow, let's calculate the product of these two numbers, \\(63\\) and \\(-63\\).",
129
+ 'action':
130
+ '```python\n# Calculate the product of 63 and -63\nproduct = 63 * (-63)\nproduct\n```',
131
+ 'status': 1
132
+ },
133
+ 'type': None,
134
+ 'receiver': None
135
+ }, {
136
+ 'sender': 'ActionExecutor',
137
+ 'content': '-3969',
138
+ 'formatted': None,
139
+ 'type': None,
140
+ 'receiver': None
141
+ }, {
142
+ 'sender': 'Agent',
143
+ 'content':
144
+ 'The product of the two numbers, \\(63\\) and \\(-63\\), is \\(-3969\\). Therefore, the product of the two numbers that Marie and Jay are thinking of is \\(\\boxed{-3969}\\).',
145
+ 'formatted': {
146
+ 'tool_type': None,
147
+ 'thought':
148
+ 'The product of the two numbers, \\(63\\) and \\(-63\\), is \\(-3969\\). Therefore, the product of the two numbers that Marie and Jay are thinking of is \\(\\boxed{-3969}\\).',
149
+ 'action': None,
150
+ 'status': 0
151
+ },
152
+ 'type': None,
153
+ 'receiver': None
154
+ }]
155
+ }
156
+ coder.load_state_dict(state_dict)
157
+ print(coder.state_dict() == state_dict)
158
+ print(coder.state_dict())
159
+
160
+ # ----------------------- plugin -----------------------
161
+ print('-' * 80, 'plugin', '-' * 80)
162
+ plugins = [dict(type='lagent.actions.ArxivSearch')]
163
+ agent = AgentForInternLM(
164
+ llm=model,
165
+ plugins=plugins,
166
+ output_format=dict(
167
+ type=PluginParser,
168
+ template=PLUGIN_CN,
169
+ prompt=get_plugin_prompt(plugins)))
170
+
171
+ tic = time.time()
172
+ query = 'LLM智能体方向的最新论文有哪些?'
173
+ res = agent(query, session_id=0)
174
+ print(res.model_dump_json())
175
+ print('-' * 120)
176
+ print(agent.get_steps(0))
177
+ print(f'time elapsed: {time.time() - tic}')
examples/run_agent_services.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import time
3
+
4
+ from lagent.agents.stream import PLUGIN_CN, get_plugin_prompt
5
+ from lagent.distributed import AsyncHTTPAgentClient, AsyncHTTPAgentServer, HTTPAgentClient, HTTPAgentServer
6
+ from lagent.llms import INTERNLM2_META
7
+ from lagent.schema import AgentMessage
8
+ from lagent.utils import create_object
9
+
10
+ loop = asyncio.new_event_loop()
11
+ asyncio.set_event_loop(loop)
12
+
13
+ server = HTTPAgentServer(
14
+ '1',
15
+ {
16
+ 'type': 'lagent.agents.AsyncAgent',
17
+ 'llm': {
18
+ 'type': 'lagent.llms.AsyncLMDeployPipeline',
19
+ 'path': 'internlm/internlm2_5-7b-chat',
20
+ 'meta_template': INTERNLM2_META,
21
+ }
22
+ },
23
+ port=8090,
24
+ )
25
+ print(server.is_alive)
26
+ message = AgentMessage(sender='user', content='hello')
27
+ result = server(message)
28
+ print(result)
29
+ server.shutdown()
30
+
31
+ # math coder
32
+ server = AsyncHTTPAgentServer(
33
+ '1',
34
+ {
35
+ 'type': 'lagent.agents.AsyncMathCoder',
36
+ 'llm': {
37
+ 'type': 'lagent.llms.AsyncLMDeployPipeline',
38
+ 'path': 'internlm/internlm2_5-7b-chat',
39
+ 'meta_template': INTERNLM2_META,
40
+ 'tp': 1,
41
+ 'top_k': 1,
42
+ 'temperature': 1.0,
43
+ 'stop_words': ['<|im_end|>', '<|action_end|>'],
44
+ 'max_new_tokens': 1024,
45
+ },
46
+ 'interpreter': {
47
+ 'type': 'lagent.actions.AsyncIPythonInterpreter',
48
+ 'max_kernels': 100
49
+ },
50
+ },
51
+ port=8091,
52
+ )
53
+ message = AgentMessage(
54
+ sender='user',
55
+ content=
56
+ ('Marie is thinking of a multiple of 63, while Jay is thinking of a factor '
57
+ 'of 63. They happen to be thinking of the same number. There are two '
58
+ 'possibilities for the number that each of them is thinking of, one '
59
+ 'positive and one negative. Find the product of these two numbers.'))
60
+ result = server(message)
61
+ print(loop.run_until_complete(result))
62
+ print(server.state_dict())
63
+
64
+ client = AsyncHTTPAgentClient(port=8091)
65
+ result = client('hello', session_id=1)
66
+ print(loop.run_until_complete(result))
67
+ print(client.state_dict(1))
68
+
69
+ client = HTTPAgentClient(port=8091)
70
+ print(client.state_dict(1))
71
+ print(client('introduce yourself', session_id=1))
72
+ print(client.state_dict(1))
73
+ server.shutdown()
74
+
75
+ # plugins
76
+ plugins = [dict(type='lagent.actions.AsyncArxivSearch')]
77
+ server_cfg = dict(
78
+ type='lagent.distributed.AsyncHTTPAgentServer',
79
+ gpu_id='1',
80
+ config={
81
+ 'type': 'lagent.agents.AsyncAgentForInternLM',
82
+ 'llm': {
83
+ 'type': 'lagent.llms.AsyncLMDeployPipeline',
84
+ 'path': 'internlm/internlm2_5-7b-chat',
85
+ 'meta_template': INTERNLM2_META,
86
+ 'tp': 1,
87
+ 'top_k': 1,
88
+ 'temperature': 1.0,
89
+ 'stop_words': ['<|im_end|>', '<|action_end|>'],
90
+ 'max_new_tokens': 1024,
91
+ },
92
+ 'plugins': plugins,
93
+ 'output_format': {
94
+ 'type': 'lagent.prompts.parsers.PluginParser',
95
+ 'template': PLUGIN_CN,
96
+ 'prompt': get_plugin_prompt(plugins),
97
+ }
98
+ },
99
+ port=8091,
100
+ )
101
+ server = create_object(server_cfg)
102
+ tic = time.time()
103
+ coros = [
104
+ server(query, session_id=i)
105
+ for i, query in enumerate(['LLM智能体方向的最新论文有哪些?'] * 50)
106
+ ]
107
+ res = loop.run_until_complete(asyncio.gather(*coros))
108
+ print('-' * 120)
109
+ print(f'time elapsed: {time.time() - tic}')
110
+ server.shutdown()
examples/run_async_agent_lmdeploy.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import time
4
+
5
+ from datasets import load_dataset
6
+
7
+ from lagent.agents.stream import PLUGIN_CN, AsyncAgentForInternLM, AsyncMathCoder, get_plugin_prompt
8
+ from lagent.llms import INTERNLM2_META
9
+ from lagent.llms.lmdeploy_wrapper import AsyncLMDeployPipeline
10
+ from lagent.prompts.parsers import PluginParser
11
+
12
+ # set up the loop
13
+ loop = asyncio.new_event_loop()
14
+ asyncio.set_event_loop(loop)
15
+ # initialize the model
16
+ model = AsyncLMDeployPipeline(
17
+ path='internlm/internlm2_5-7b-chat',
18
+ meta_template=INTERNLM2_META,
19
+ model_name='internlm-chat',
20
+ tp=1,
21
+ top_k=1,
22
+ temperature=1.0,
23
+ stop_words=['<|im_end|>', '<|action_end|>'],
24
+ max_new_tokens=1024,
25
+ )
26
+
27
+ # ----------------------- interpreter -----------------------
28
+ print('-' * 80, 'interpreter', '-' * 80)
29
+
30
+ ds = load_dataset('lighteval/MATH', split='test')
31
+ problems = [item['problem'] for item in ds.select(range(0, 5000, 2))]
32
+
33
+ coder = AsyncMathCoder(
34
+ llm=model,
35
+ interpreter=dict(
36
+ type='lagent.actions.AsyncIPythonInterpreter', max_kernels=300),
37
+ max_turn=11)
38
+ tic = time.time()
39
+ coros = [coder(query, session_id=i) for i, query in enumerate(problems)]
40
+ res = loop.run_until_complete(asyncio.gather(*coros))
41
+ # print([r.model_dump_json() for r in res])
42
+ print('-' * 120)
43
+ print(f'time elapsed: {time.time() - tic}')
44
+
45
+ with open('./tmp_1.json', 'w') as f:
46
+ json.dump([coder.get_steps(i) for i in range(len(res))],
47
+ f,
48
+ ensure_ascii=False,
49
+ indent=4)
50
+
51
+ # ----------------------- plugin -----------------------
52
+ print('-' * 80, 'plugin', '-' * 80)
53
+ plugins = [dict(type='lagent.actions.AsyncArxivSearch')]
54
+ agent = AsyncAgentForInternLM(
55
+ llm=model,
56
+ plugins=plugins,
57
+ output_format=dict(
58
+ type=PluginParser,
59
+ template=PLUGIN_CN,
60
+ prompt=get_plugin_prompt(plugins)))
61
+
62
+ tic = time.time()
63
+ coros = [
64
+ agent(query, session_id=i)
65
+ for i, query in enumerate(['LLM智能体方向的最新论文有哪些?'] * 50)
66
+ ]
67
+ res = loop.run_until_complete(asyncio.gather(*coros))
68
+ # print([r.model_dump_json() for r in res])
69
+ print('-' * 120)
70
+ print(f'time elapsed: {time.time() - tic}')
examples/run_async_agent_lmdeploy_server.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import time
4
+
5
+ from datasets import load_dataset
6
+
7
+ from lagent.agents.stream import AsyncAgentForInternLM, AsyncMathCoder, get_plugin_prompt
8
+ from lagent.llms import INTERNLM2_META
9
+ from lagent.llms.lmdeploy_wrapper import AsyncLMDeployClient, AsyncLMDeployServer
10
+
11
+ # set up the loop
12
+ loop = asyncio.new_event_loop()
13
+ asyncio.set_event_loop(loop)
14
+ # initialize the model
15
+ model = AsyncLMDeployServer(
16
+ path='internlm/internlm2_5-7b-chat',
17
+ meta_template=INTERNLM2_META,
18
+ model_name='internlm-chat',
19
+ tp=1,
20
+ top_k=1,
21
+ temperature=1.0,
22
+ stop_words=['<|im_end|>', '<|action_end|>'],
23
+ max_new_tokens=1024,
24
+ )
25
+
26
+ # ----------------------- interpreter -----------------------
27
+ print('-' * 80, 'interpreter', '-' * 80)
28
+
29
+ ds = load_dataset('lighteval/MATH', split='test')
30
+ problems = [item['problem'] for item in ds.select(range(50))]
31
+
32
+
33
+ # coder = AsyncMathCoder(
34
+ # llm=model,
35
+ # interpreter=dict(type='AsyncIPythonInterpreter', max_kernels=250))
36
+ # tic = time.time()
37
+ # coros = [coder(query, session_id=i) for i, query in enumerate(problems)]
38
+ # res = loop.run_until_complete(asyncio.gather(*coros))
39
+ # # print([r.model_dump_json() for r in res])
40
+ # print('-' * 120)
41
+ # print(f'time elapsed: {time.time() - tic}')
42
+ # with open('./tmp_4.json', 'w') as f:
43
+ # json.dump([coder.get_steps(i) for i in range(len(res))],
44
+ # f,
45
+ # ensure_ascii=False,
46
+ # indent=4)
47
+
48
+ # ----------------------- streaming chat -----------------------
49
+ async def streaming(llm, problem):
50
+ async for out in llm.stream_chat([{'role': 'user', 'content': problem}]):
51
+ print(out)
52
+
53
+
54
+ tic = time.time()
55
+ client = AsyncLMDeployClient(
56
+ url='http://127.0.0.1:23333',
57
+ meta_template=INTERNLM2_META,
58
+ model_name='internlm2_5-7b-chat',
59
+ top_k=1,
60
+ temperature=1.0,
61
+ stop_words=['<|im_end|>', '<|action_end|>'],
62
+ max_new_tokens=1024,
63
+ )
64
+ # loop.run_until_complete(streaming(model, problems[0]))
65
+ loop.run_until_complete(streaming(client, problems[0]))
66
+ print(time.time() - tic)
67
+
68
+ # ----------------------- plugin -----------------------
69
+ # print('-' * 80, 'plugin', '-' * 80)
70
+ # plugins = [dict(type='AsyncArxivSearch')]
71
+ # agent = AsyncAgentForInternLM(
72
+ # llm=model,
73
+ # plugins=plugins,
74
+ # aggregator=dict(
75
+ # type='InternLMToolAggregator',
76
+ # plugin_prompt=get_plugin_prompt(plugins)))
77
+
78
+ # tic = time.time()
79
+ # coros = [
80
+ # agent(query, session_id=i)
81
+ # for i, query in enumerate(['LLM智能体方向的最新论文有哪些?'] * 50)
82
+ # ]
83
+ # res = loop.run_until_complete(asyncio.gather(*coros))
84
+ # # print([r.model_dump_json() for r in res])
85
+ # print('-' * 120)
86
+ # print(f'time elapsed: {time.time() - tic}')
examples/run_async_agent_openai.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import time
4
+
5
+ from datasets import load_dataset
6
+
7
+ from lagent.agents import AsyncMathCoder
8
+ from lagent.agents.aggregator import InternLMToolAggregator
9
+ from lagent.llms import AsyncGPTAPI
10
+ from lagent.prompts.parsers import ToolParser
11
+
12
+ loop = asyncio.new_event_loop()
13
+ asyncio.set_event_loop(loop)
14
+
15
+ interpreter_prompt = (
16
+ 'Below is a math problem. Please solve it step by step with the assistance of Python programming. Consider using Sympy or Numpy library '
17
+ 'to facilitate your derivation, calculation and equation solving. Utilize the "pi" symbol and "Rational" from Sympy '
18
+ 'for $$\pi$$ and fractions, and simplify all fractions and square roots without converting them to decimal values. '
19
+ 'Please encapsulate each generated Jupyter Python code block with tags "{begin}" and "{end}". Conclude the '
20
+ r'final answer when observations are sufficient and encapsulate the numerical result with LaTeX syntax "\boxed{{}}" '
21
+ 'without any unit, and end your conclusion with the special token "[END]" to denote the completion of your response. '
22
+ 'Keep the following points in mind:\n'
23
+ '- You must alternately use human and programming languages in the chain of thought;\n'
24
+ '- The number of your reasoning steps should not exceed **three**, which means you may merge some intermediate steps when the original answer is tedious.'
25
+ )
26
+
27
+ async_llm = dict(
28
+ type=AsyncGPTAPI,
29
+ model='gpt-4o-2024-05-13',
30
+ retry=50,
31
+ key='',
32
+ max_new_tokens=2048,
33
+ stop_words=['</python'],
34
+ proxies=dict(),
35
+ )
36
+ async_agent = AsyncMathCoder(
37
+ llm=async_llm,
38
+ output_format=ToolParser(
39
+ tool_type='interpreter',
40
+ template=interpreter_prompt,
41
+ begin='<python>\n',
42
+ end='\n</python>'),
43
+ aggregator=InternLMToolAggregator(
44
+ environment_role='system',
45
+ environment_begin='<output>\n',
46
+ environment_end='\n</output>'),
47
+ finish_condition=lambda m: '[END]' in m.content,
48
+ )
49
+
50
+ ds = load_dataset('lighteval/MATH', split='train')
51
+ problems = [item['problem'] for item in ds.select(range(30))]
52
+
53
+ tic = time.time()
54
+ coros = [async_agent(q, session_id=i) for i, q in enumerate(problems)]
55
+ res = loop.run_until_complete(asyncio.gather(*coros))
56
+ print(time.time() - tic)
57
+ with open('tmp_6.json', 'w') as f:
58
+ json.dump([async_agent.get_steps(i) for i in range(len(problems))],
59
+ f,
60
+ ensure_ascii=False,
61
+ indent=4)
examples/run_async_agent_vllm.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import time
4
+
5
+ from datasets import load_dataset
6
+
7
+ from lagent.agents.stream import AsyncAgentForInternLM, AsyncMathCoder
8
+ from lagent.llms import INTERNLM2_META
9
+ from lagent.llms.vllm_wrapper import AsyncVllmModel
10
+ from lagent.prompts.parsers import ToolParser
11
+
12
+ # set up the loop
13
+ loop = asyncio.new_event_loop()
14
+ asyncio.set_event_loop(loop)
15
+ # initialize the model
16
+ model = AsyncVllmModel(
17
+ path='Qwen/Qwen2-7B-Instruct',
18
+ meta_template=INTERNLM2_META,
19
+ tp=1,
20
+ top_k=1,
21
+ temperature=1.0,
22
+ stop_words=['<|im_end|>', '\n```\n'],
23
+ max_new_tokens=1024,
24
+ )
25
+
26
+ # ----------------------- interpreter -----------------------
27
+ print('-' * 80, 'interpreter', '-' * 80)
28
+
29
+ ds = load_dataset('lighteval/MATH', split='test')
30
+ problems = [item['problem'] for item in ds.select(range(50))]
31
+
32
+ coder = AsyncMathCoder(
33
+ llm=model,
34
+ interpreter=dict(
35
+ type='lagent.actions.AsyncIPythonInterpreter', max_kernels=200),
36
+ output_format=ToolParser(
37
+ 'interpreter',
38
+ template=
39
+ ('Integrate step-by-step reasoning and Python code to solve math problems '
40
+ 'using the following guidelines:\n'
41
+ '- Analyze the question and write jupyter code to solve the problem;\n'
42
+ r"- Present the final result in LaTeX using a '\boxed{{}}' without any "
43
+ 'units. \n'),
44
+ begin='\n```python\n',
45
+ end='\n```\n'))
46
+
47
+ tic = time.time()
48
+ coros = [coder(query, session_id=i) for i, query in enumerate(problems)]
49
+ res = loop.run_until_complete(asyncio.gather(*coros))
50
+ # print([r.model_dump_json() for r in res])
51
+ print('-' * 120)
52
+ print(f'time elapsed: {time.time() - tic}')
53
+
54
+ with open('./tmp_3.json', 'w') as f:
55
+ json.dump([coder.get_steps(i) for i in range(len(res))],
56
+ f,
57
+ ensure_ascii=False,
58
+ indent=4)
examples/run_ray_async_agent_lmdeploy.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import time
4
+
5
+ import ray
6
+ from datasets import load_dataset
7
+
8
+ from lagent.distributed.ray_serve import AsyncAgentRayActor
9
+ from lagent.llms import INTERNLM2_META
10
+ from lagent.llms.lmdeploy_wrapper import AsyncLMDeployPipeline
11
+
12
+ ray.init()
13
+
14
+ # set up the loop
15
+
16
+ # initialize the model
17
+ loop = asyncio.new_event_loop()
18
+ asyncio.set_event_loop(loop)
19
+ model = dict(
20
+ type=AsyncLMDeployPipeline,
21
+ path='internlm/internlm2_5-7b-chat',
22
+ meta_template=INTERNLM2_META,
23
+ tp=1,
24
+ top_k=1,
25
+ temperature=1.0,
26
+ stop_words=['<|im_end|>', '<|action_end|>'],
27
+ max_new_tokens=1024,
28
+ )
29
+
30
+ # ----------------------- interpreter -----------------------
31
+ print('-' * 80, 'interpreter', '-' * 80)
32
+ ds = load_dataset('lighteval/MATH', split='test')
33
+ problems = [item['problem'] for item in ds.select(range(5000))]
34
+
35
+ coder = dict(
36
+ type='lagent.agents.stream.AsyncMathCoder',
37
+ llm=model,
38
+ interpreter=dict(type='AsyncIPythonInterpreter', max_kernels=300),
39
+ )
40
+ tic = time.time()
41
+
42
+ actor1 = AsyncAgentRayActor(coder.copy(), num_gpus=1)
43
+ actor2 = AsyncAgentRayActor(coder.copy(), num_gpus=1)
44
+ corots = [
45
+ actor1(query, session_id=i)
46
+ for i, query in enumerate(problems[:len(problems) // 2])
47
+ ]
48
+ corots += [
49
+ actor2(query, session_id=i)
50
+ for i, query in enumerate(problems[len(problems) // 2:])
51
+ ]
52
+ results = loop.run_until_complete(asyncio.gather(*corots))
53
+
54
+ print('-' * 120)
55
+ print(f'time elapsed: {time.time() - tic}')
56
+ all_step = ray.get([
57
+ actor1.agent_actor.get_steps.remote(i) for i in range(len(problems) // 2)
58
+ ])
59
+ all_step += ray.get([
60
+ actor2.agent_actor.get_steps.remote(i)
61
+ for i in range(len(problems[len(problems) // 2:]))
62
+ ])
63
+
64
+ with open('./tmp_1.json', 'w') as f:
65
+ json.dump(all_step, f, ensure_ascii=False, indent=4)
lagent.egg-info/PKG-INFO ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: lagent
3
+ Version: 0.5.0rc1
4
+ Summary: A lightweight framework for building LLM-based agents
5
+ Home-page: https://github.com/InternLM/lagent
6
+ License: Apache 2.0
7
+ Keywords: artificial general intelligence,agent,agi,llm
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: aiohttp
11
+ Requires-Dist: arxiv
12
+ Requires-Dist: asyncache
13
+ Requires-Dist: asyncer
14
+ Requires-Dist: distro
15
+ Requires-Dist: duckduckgo_search==5.3.1b1
16
+ Requires-Dist: filelock
17
+ Requires-Dist: func_timeout
18
+ Requires-Dist: griffe<1.0
19
+ Requires-Dist: json5
20
+ Requires-Dist: jsonschema
21
+ Requires-Dist: jupyter==1.0.0
22
+ Requires-Dist: jupyter_client==8.6.2
23
+ Requires-Dist: jupyter_core==5.7.2
24
+ Requires-Dist: pydantic==2.6.4
25
+ Requires-Dist: requests
26
+ Requires-Dist: termcolor
27
+ Requires-Dist: tiktoken
28
+ Requires-Dist: timeout-decorator
29
+ Requires-Dist: typing-extensions
30
+ Provides-Extra: all
31
+ Requires-Dist: google-search-results; extra == "all"
32
+ Requires-Dist: lmdeploy>=0.2.5; extra == "all"
33
+ Requires-Dist: pillow; extra == "all"
34
+ Requires-Dist: python-pptx; extra == "all"
35
+ Requires-Dist: timeout_decorator; extra == "all"
36
+ Requires-Dist: torch; extra == "all"
37
+ Requires-Dist: transformers<=4.40,>=4.34; extra == "all"
38
+ Requires-Dist: vllm>=0.3.3; extra == "all"
39
+ Requires-Dist: aiohttp; extra == "all"
40
+ Requires-Dist: arxiv; extra == "all"
41
+ Requires-Dist: asyncache; extra == "all"
42
+ Requires-Dist: asyncer; extra == "all"
43
+ Requires-Dist: distro; extra == "all"
44
+ Requires-Dist: duckduckgo_search==5.3.1b1; extra == "all"
45
+ Requires-Dist: filelock; extra == "all"
46
+ Requires-Dist: func_timeout; extra == "all"
47
+ Requires-Dist: griffe<1.0; extra == "all"
48
+ Requires-Dist: json5; extra == "all"
49
+ Requires-Dist: jsonschema; extra == "all"
50
+ Requires-Dist: jupyter==1.0.0; extra == "all"
51
+ Requires-Dist: jupyter_client==8.6.2; extra == "all"
52
+ Requires-Dist: jupyter_core==5.7.2; extra == "all"
53
+ Requires-Dist: pydantic==2.6.4; extra == "all"
54
+ Requires-Dist: requests; extra == "all"
55
+ Requires-Dist: termcolor; extra == "all"
56
+ Requires-Dist: tiktoken; extra == "all"
57
+ Requires-Dist: timeout-decorator; extra == "all"
58
+ Requires-Dist: typing-extensions; extra == "all"
59
+ Provides-Extra: optional
60
+ Requires-Dist: google-search-results; extra == "optional"
61
+ Requires-Dist: lmdeploy>=0.2.5; extra == "optional"
62
+ Requires-Dist: pillow; extra == "optional"
63
+ Requires-Dist: python-pptx; extra == "optional"
64
+ Requires-Dist: timeout_decorator; extra == "optional"
65
+ Requires-Dist: torch; extra == "optional"
66
+ Requires-Dist: transformers<=4.40,>=4.34; extra == "optional"
67
+ Requires-Dist: vllm>=0.3.3; extra == "optional"
68
+
69
+ <div id="top"></div>
70
+ <div align="center">
71
+ <img src="docs/imgs/lagent_logo.png" width="450"/>
72
+
73
+ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://lagent.readthedocs.io/en/latest/)
74
+ [![PyPI](https://img.shields.io/pypi/v/lagent)](https://pypi.org/project/lagent)
75
+ [![license](https://img.shields.io/github/license/InternLM/lagent.svg)](https://github.com/InternLM/lagent/tree/main/LICENSE)
76
+ [![issue resolution](https://img.shields.io/github/issues-closed-raw/InternLM/lagent)](https://github.com/InternLM/lagent/issues)
77
+ [![open issues](https://img.shields.io/github/issues-raw/InternLM/lagent)](https://github.com/InternLM/lagent/issues)
78
+ ![Visitors](https://api.visitorbadge.io/api/visitors?path=InternLM%2Flagent%20&countColor=%23263759&style=flat)
79
+ ![GitHub forks](https://img.shields.io/github/forks/InternLM/lagent)
80
+ ![GitHub Repo stars](https://img.shields.io/github/stars/InternLM/lagent)
81
+ ![GitHub contributors](https://img.shields.io/github/contributors/InternLM/lagent)
82
+
83
+ </div>
84
+
85
+ <p align="center">
86
+ 👋 join us on <a href="https://twitter.com/intern_lm" target="_blank">𝕏 (Twitter)</a>, <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://r.vansin.top/?r=internwx" target="_blank">WeChat</a>
87
+ </p>
88
+
89
+ ## Installation
90
+
91
+ Install from source:
92
+
93
+ ```bash
94
+ git clone https://github.com/InternLM/lagent.git
95
+ cd lagent
96
+ pip install -e .
97
+ ```
98
+
99
+ ## Usage
100
+
101
+ Lagent is inspired by the design philosophy of PyTorch. We expect that the analogy of neural network layers will make the workflow clearer and more intuitive, so users only need to focus on creating layers and defining message passing between them in a Pythonic way. This is a simple tutorial to get you quickly started with building multi-agent applications.
102
+
103
+ ### Models as Agents
104
+
105
+ Agents use `AgentMessage` for communication.
106
+
107
+ ```python
108
+ from typing import Dict, List
109
+ from lagent.agents import Agent
110
+ from lagent.schema import AgentMessage
111
+ from lagent.llms import VllmModel, INTERNLM2_META
112
+
113
+ llm = VllmModel(
114
+ path='Qwen/Qwen2-7B-Instruct',
115
+ meta_template=INTERNLM2_META,
116
+ tp=1,
117
+ top_k=1,
118
+ temperature=1.0,
119
+ stop_words=['<|im_end|>'],
120
+ max_new_tokens=1024,
121
+ )
122
+ system_prompt = '你的回答只能从“典”、“孝”、“急”三个字中选一个。'
123
+ agent = Agent(llm, system_prompt)
124
+
125
+ user_msg = AgentMessage(sender='user', content='今天天气情况')
126
+ bot_msg = agent(user_msg)
127
+ print(bot_msg)
128
+ ```
129
+
130
+ ```
131
+ content='急' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=<AgentStatusCode.END: 0>
132
+ ```
133
+
134
+ ### Memory as State
135
+
136
+ Both input and output messages will be added to the memory of `Agent` in each forward pass. This is performed in `__call__` rather than `forward`. See the following pseudo code
137
+
138
+ ```python
139
+ def __call__(self, *message):
140
+ message = pre_hooks(message)
141
+ add_memory(message)
142
+ message = self.forward(*message)
143
+ add_memory(message)
144
+ message = post_hooks(message)
145
+ return message
146
+ ```
147
+
148
+ Inspect the memory in two ways
149
+
150
+ ```python
151
+ memory: List[AgentMessage] = agent.memory.get_memory()
152
+ print(memory)
153
+ print('-' * 120)
154
+ dumped_memory: Dict[str, List[dict]] = agent.state_dict()
155
+ print(dumped_memory['memory'])
156
+ ```
157
+
158
+ ```
159
+ [AgentMessage(content='今天天气情况', sender='user', formatted=None, extra_info=None, type=None, receiver=None, stream_state=<AgentStatusCode.END: 0>), AgentMessage(content='急', sender='Agent', formatted=None, extra_info=None, type=None, receiver=None, stream_state=<AgentStatusCode.END: 0>)]
160
+ ------------------------------------------------------------------------------------------------------------------------
161
+ [{'content': '今天天气情况', 'sender': 'user', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': <AgentStatusCode.END: 0>}, {'content': '急', 'sender': 'Agent', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': <AgentStatusCode.END: 0>}]
162
+ ```
163
+
164
+ Clear the memory of this session(`session_id=0` by default):
165
+
166
+ ```python
167
+ agent.memory.reset()
168
+ ```
169
+
170
+ ### Custom Message Aggregation
171
+
172
+ `DefaultAggregator` is called under the hood to assemble and convert `AgentMessage` to OpenAI message format.
173
+
174
+ ```python
175
+ def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
176
+ formatted_messages = self.aggregator.aggregate(
177
+ self.memory.get(session_id),
178
+ self.name,
179
+ self.output_format,
180
+ self.template,
181
+ )
182
+ llm_response = self.llm.chat(formatted_messages, **kwargs)
183
+ ...
184
+ ```
185
+
186
+ Implement a simple aggregator that can receive few-shots
187
+
188
+ ```python
189
+ from typing import List, Union
190
+ from lagent.memory import Memory
191
+ from lagent.prompts import StrParser
192
+ from lagent.agents.aggregator import DefaultAggregator
193
+
194
+ class FewshotAggregator(DefaultAggregator):
195
+ def __init__(self, few_shot: List[dict] = None):
196
+ self.few_shot = few_shot or []
197
+
198
+ def aggregate(self,
199
+ messages: Memory,
200
+ name: str,
201
+ parser: StrParser = None,
202
+ system_instruction: Union[str, dict, List[dict]] = None) -> List[dict]:
203
+ _message = []
204
+ if system_instruction:
205
+ _message.extend(
206
+ self.aggregate_system_intruction(system_instruction))
207
+ _message.extend(self.few_shot)
208
+ messages = messages.get_memory()
209
+ for message in messages:
210
+ if message.sender == name:
211
+ _message.append(
212
+ dict(role='assistant', content=str(message.content)))
213
+ else:
214
+ user_message = message.content
215
+ if len(_message) > 0 and _message[-1]['role'] == 'user':
216
+ _message[-1]['content'] += user_message
217
+ else:
218
+ _message.append(dict(role='user', content=user_message))
219
+ return _message
220
+
221
+ agent = Agent(
222
+ llm,
223
+ aggregator=FewshotAggregator(
224
+ [
225
+ {"role": "user", "content": "今天天气"},
226
+ {"role": "assistant", "content": "【晴】"},
227
+ ]
228
+ )
229
+ )
230
+ user_msg = AgentMessage(sender='user', content='昨天天气')
231
+ bot_msg = agent(user_msg)
232
+ print(bot_msg)
233
+ ```
234
+
235
+ ```
236
+ content='【多云转晴,夜间有轻微降温】' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=<AgentStatusCode.END: 0>
237
+ ```
238
+
239
+ ### Flexible Response Formatting
240
+
241
+ In `AgentMessage`, `formatted` is reserved to store information parsed by `output_format` from the model output.
242
+
243
+ ```python
244
+ def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
245
+ ...
246
+ llm_response = self.llm.chat(formatted_messages, **kwargs)
247
+ if self.output_format:
248
+ formatted_messages = self.output_format.parse_response(llm_response)
249
+ return AgentMessage(
250
+ sender=self.name,
251
+ content=llm_response,
252
+ formatted=formatted_messages,
253
+ )
254
+ ...
255
+ ```
256
+
257
+ Use a tool parser as follows
258
+
259
+ ````python
260
+ from lagent.prompts.parsers import ToolParser
261
+
262
+ system_prompt = "逐步分析并编写Python代码解决以下问题。"
263
+ parser = ToolParser(tool_type='code interpreter', begin='```python\n', end='\n```\n')
264
+ llm.gen_params['stop_words'].append('\n```\n')
265
+ agent = Agent(llm, system_prompt, output_format=parser)
266
+
267
+ user_msg = AgentMessage(
268
+ sender='user',
269
+ content='Marie is thinking of a multiple of 63, while Jay is thinking of a '
270
+ 'factor of 63. They happen to be thinking of the same number. There are '
271
+ 'two possibilities for the number that each of them is thinking of, one '
272
+ 'positive and one negative. Find the product of these two numbers.')
273
+ bot_msg = agent(user_msg)
274
+ print(bot_msg.model_dump_json(indent=4))
275
+ ````
276
+
277
+ ````
278
+ {
279
+ "content": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n```python\ndef find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
280
+ "sender": "Agent",
281
+ "formatted": {
282
+ "tool_type": "code interpreter",
283
+ "thought": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n",
284
+ "action": "def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
285
+ "status": 1
286
+ },
287
+ "extra_info": null,
288
+ "type": null,
289
+ "receiver": null,
290
+ "stream_state": 0
291
+ }
292
+ ````
293
+
294
+ ### Consistency of Tool Calling
295
+
296
+ `ActionExecutor` uses the same communication data structure as `Agent`, but requires the content of input `AgentMessage` to be a dict containing:
297
+
298
+ - `name`: tool name, e.g. `'IPythonInterpreter'`, `'WebBrowser.search'`.
299
+ - `parameters`: keyword arguments of the tool API, e.g. `{'command': 'import math;math.sqrt(2)'}`, `{'query': ['recent progress in AI']}`.
300
+
301
+ You can register custom hooks for message conversion.
302
+
303
+ ```python
304
+ from lagent.hooks import Hook
305
+ from lagent.schema import ActionReturn, ActionStatusCode, AgentMessage
306
+ from lagent.actions import ActionExecutor, IPythonInteractive
307
+
308
+ class CodeProcessor(Hook):
309
+ def before_action(self, executor, message, session_id):
310
+ message = message.copy(deep=True)
311
+ message.content = dict(
312
+ name='IPythonInteractive', parameters={'command': message.formatted['action']}
313
+ )
314
+ return message
315
+
316
+ def after_action(self, executor, message, session_id):
317
+ action_return = message.content
318
+ if isinstance(action_return, ActionReturn):
319
+ if action_return.state == ActionStatusCode.SUCCESS:
320
+ response = action_return.format_result()
321
+ else:
322
+ response = action_return.errmsg
323
+ else:
324
+ response = action_return
325
+ message.content = response
326
+ return message
327
+
328
+ executor = ActionExecutor(actions=[IPythonInteractive()], hooks=[CodeProcessor()])
329
+ bot_msg = AgentMessage(
330
+ sender='Agent',
331
+ content='首先,我们需要...',
332
+ formatted={
333
+ 'tool_type': 'code interpreter',
334
+ 'thought': '首先,我们需要...',
335
+ 'action': 'def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)',
336
+ 'status': 1
337
+ })
338
+ executor_msg = executor(bot_msg)
339
+ print(executor_msg)
340
+ ```
341
+
342
+ ```
343
+ content='3969.0' sender='ActionExecutor' formatted=None extra_info=None type=None receiver=None stream_state=<AgentStatusCode.END: 0>
344
+ ```
345
+
346
+ **For convenience, Lagent provides `InternLMActionProcessor` which is adapted to messages formatted by `ToolParser` as mentioned above.**
347
+
348
+ ### Dual Interfaces
349
+
350
+ Lagent adopts dual interface design, where almost every component(LLMs, actions, action executors...) has the corresponding asynchronous variant by prefixing its identifier with 'Async'. It is recommended to use synchronous agents for debugging and asynchronous ones for large-scale inference to make the most of idle CPU and GPU resources.
351
+
352
+ However, make sure the internal consistency of agents, i.e. asynchronous agents should be equipped with asynchronous LLMs and asynchronous action executors that drive asynchronous tools.
353
+
354
+ ```python
355
+ from lagent.llms import VllmModel, AsyncVllmModel, LMDeployPipeline, AsyncLMDeployPipeline
356
+ from lagent.actions import ActionExecutor, AsyncActionExecutor, WebBrowser, AsyncWebBrowser
357
+ from lagent.agents import Agent, AsyncAgent, AgentForInternLM, AsyncAgentForInternLM
358
+ ```
359
+
360
+ ______________________________________________________________________
361
+
362
+ ## Practice
363
+
364
+ - **Try to implement `forward` instead of `__call__` of subclasses unless necessary.**
365
+ - **Always include the `session_id` argument explicitly, which is designed for isolation of memory, LLM requests and tool invocation(e.g. maintain multiple independent IPython environments) in concurrency.**
366
+
367
+ ### Single Agent
368
+
369
+ Math agents that solve problems by programming
370
+
371
+ ````python
372
+ from lagent.agents.aggregator import InternLMToolAggregator
373
+
374
+ class Coder(Agent):
375
+ def __init__(self, model_path, system_prompt, max_turn=3):
376
+ super().__init__()
377
+ llm = VllmModel(
378
+ path=model_path,
379
+ meta_template=INTERNLM2_META,
380
+ tp=1,
381
+ top_k=1,
382
+ temperature=1.0,
383
+ stop_words=['\n```\n', '<|im_end|>'],
384
+ max_new_tokens=1024,
385
+ )
386
+ self.agent = Agent(
387
+ llm,
388
+ system_prompt,
389
+ output_format=ToolParser(
390
+ tool_type='code interpreter', begin='```python\n', end='\n```\n'
391
+ ),
392
+ # `InternLMToolAggregator` is adapted to `ToolParser` for aggregating
393
+ # messages with tool invocations and execution results
394
+ aggregator=InternLMToolAggregator(),
395
+ )
396
+ self.executor = ActionExecutor([IPythonInteractive()], hooks=[CodeProcessor()])
397
+ self.max_turn = max_turn
398
+
399
+ def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
400
+ for _ in range(self.max_turn):
401
+ message = self.agent(message, session_id=session_id)
402
+ if message.formatted['tool_type'] is None:
403
+ return message
404
+ message = self.executor(message, session_id=session_id)
405
+ return message
406
+
407
+ coder = Coder('Qwen/Qwen2-7B-Instruct', 'Solve the problem step by step with assistance of Python code')
408
+ query = AgentMessage(
409
+ sender='user',
410
+ content='Find the projection of $\\mathbf{a}$ onto $\\mathbf{b} = '
411
+ '\\begin{pmatrix} 1 \\\\ -3 \\end{pmatrix}$ if $\\mathbf{a} \\cdot \\mathbf{b} = 2.$'
412
+ )
413
+ answer = coder(query)
414
+ print(answer.content)
415
+ print('-' * 120)
416
+ for msg in coder.state_dict()['agent.memory']:
417
+ print('*' * 80)
418
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
419
+ ````
420
+
421
+ ### Multiple Agents
422
+
423
+ Asynchronous blogging agents that improve writing quality by self-refinement ([original AutoGen example](https://microsoft.github.io/autogen/0.2/docs/topics/prompting-and-reasoning/reflection/))
424
+
425
+ ```python
426
+ import asyncio
427
+ import os
428
+ from lagent.llms import AsyncGPTAPI
429
+ from lagent.agents import AsyncAgent
430
+ os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY'
431
+
432
+ class PrefixedMessageHook(Hook):
433
+ def __init__(self, prefix: str, senders: list = None):
434
+ self.prefix = prefix
435
+ self.senders = senders or []
436
+
437
+ def before_agent(self, agent, messages, session_id):
438
+ for message in messages:
439
+ if message.sender in self.senders:
440
+ message.content = self.prefix + message.content
441
+
442
+ class AsyncBlogger(AsyncAgent):
443
+ def __init__(self, model_path, writer_prompt, critic_prompt, critic_prefix='', max_turn=3):
444
+ super().__init__()
445
+ llm = AsyncGPTAPI(model_type=model_path, retry=5, max_new_tokens=2048)
446
+ self.writer = AsyncAgent(llm, writer_prompt, name='writer')
447
+ self.critic = AsyncAgent(
448
+ llm, critic_prompt, name='critic', hooks=[PrefixedMessageHook(critic_prefix, ['writer'])]
449
+ )
450
+ self.max_turn = max_turn
451
+
452
+ async def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
453
+ for _ in range(self.max_turn):
454
+ message = await self.writer(message, session_id=session_id)
455
+ message = await self.critic(message, session_id=session_id)
456
+ return await self.writer(message, session_id=session_id)
457
+
458
+ blogger = AsyncBlogger(
459
+ 'gpt-4o-2024-05-13',
460
+ writer_prompt="You are an writing assistant tasked to write engaging blogpost. You try to generate the best blogpost possible for the user's request. "
461
+ "If the user provides critique, then respond with a revised version of your previous attempts",
462
+ critic_prompt="Generate critique and recommendations on the writing. Provide detailed recommendations, including requests for length, depth, style, etc..",
463
+ critic_prefix='Reflect and provide critique on the following writing. \n\n',
464
+ )
465
+ user_prompt = (
466
+ "Write an engaging blogpost on the recent updates in {topic}. "
467
+ "The blogpost should be engaging and understandable for general audience. "
468
+ "Should have more than 3 paragraphes but no longer than 1000 words.")
469
+ bot_msgs = asyncio.get_event_loop().run_until_complete(
470
+ asyncio.gather(
471
+ *[
472
+ blogger(AgentMessage(sender='user', content=user_prompt.format(topic=topic)), session_id=i)
473
+ for i, topic in enumerate(['AI', 'Biotechnology', 'New Energy', 'Video Games', 'Pop Music'])
474
+ ]
475
+ )
476
+ )
477
+ print(bot_msgs[0].content)
478
+ print('-' * 120)
479
+ for msg in blogger.state_dict(session_id=0)['writer.memory']:
480
+ print('*' * 80)
481
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
482
+ print('-' * 120)
483
+ for msg in blogger.state_dict(session_id=0)['critic.memory']:
484
+ print('*' * 80)
485
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
486
+ ```
487
+
488
+ A multi-agent workflow that performs information retrieval, data collection and chart plotting ([original LangGraph example](https://vijaykumarkartha.medium.com/multiple-ai-agents-creating-multi-agent-workflows-using-langgraph-and-langchain-0587406ec4e6))
489
+
490
+ <div align="center">
491
+ <img src="https://miro.medium.com/v2/resize:fit:1400/format:webp/1*ffzadZCKXJT7n4JaRVFvcQ.jpeg" width="850" />
492
+ </div>
493
+
494
+ ````python
495
+ import json
496
+ from lagent.actions import IPythonInterpreter, WebBrowser, ActionExecutor
497
+ from lagent.agents.stream import get_plugin_prompt
498
+ from lagent.llms import GPTAPI
499
+ from lagent.hooks import InternLMActionProcessor
500
+
501
+ TOOL_TEMPLATE = (
502
+ "You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress"
503
+ " towards answering the question. If you are unable to fully answer, that's OK, another assistant with"
504
+ " different tools will help where you left off. Execute what you can to make progress. If you or any of"
505
+ " the other assistants have the final answer or deliverable, prefix your response with {finish_pattern}"
506
+ " so the team knows to stop. You have access to the following tools:\n{tool_description}\nPlease provide"
507
+ " your thought process when you need to use a tool, followed by the call statement in this format:"
508
+ "\n{invocation_format}\\\\n**{system_prompt}**"
509
+ )
510
+
511
+ class DataVisualizer(Agent):
512
+ def __init__(self, model_path, research_prompt, chart_prompt, finish_pattern="Final Answer", max_turn=10):
513
+ super().__init__()
514
+ llm = GPTAPI(model_path, key='YOUR_OPENAI_API_KEY', retry=5, max_new_tokens=1024, stop_words=["```\n"])
515
+ interpreter, browser = IPythonInterpreter(), WebBrowser("BingSearch", api_key="YOUR_BING_API_KEY")
516
+ self.researcher = Agent(
517
+ llm,
518
+ TOOL_TEMPLATE.format(
519
+ finish_pattern=finish_pattern,
520
+ tool_description=get_plugin_prompt(browser),
521
+ invocation_format='```json\n{"name": {{tool name}}, "parameters": {{keyword arguments}}}\n```\n',
522
+ system_prompt=research_prompt,
523
+ ),
524
+ output_format=ToolParser(
525
+ "browser",
526
+ begin="```json\n",
527
+ end="\n```\n",
528
+ validate=lambda x: json.loads(x.rstrip('`')),
529
+ ),
530
+ aggregator=InternLMToolAggregator(),
531
+ name="researcher",
532
+ )
533
+ self.charter = Agent(
534
+ llm,
535
+ TOOL_TEMPLATE.format(
536
+ finish_pattern=finish_pattern,
537
+ tool_description=interpreter.name,
538
+ invocation_format='```python\n{{code}}\n```\n',
539
+ system_prompt=chart_prompt,
540
+ ),
541
+ output_format=ToolParser(
542
+ "interpreter",
543
+ begin="```python\n",
544
+ end="\n```\n",
545
+ validate=lambda x: x.rstrip('`'),
546
+ ),
547
+ aggregator=InternLMToolAggregator(),
548
+ name="charter",
549
+ )
550
+ self.executor = ActionExecutor([interpreter, browser], hooks=[InternLMActionProcessor()])
551
+ self.finish_pattern = finish_pattern
552
+ self.max_turn = max_turn
553
+
554
+ def forward(self, message, session_id=0):
555
+ for _ in range(self.max_turn):
556
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) # override llm stop words
557
+ while message.formatted["tool_type"]:
558
+ message = self.executor(message, session_id=session_id)
559
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"])
560
+ if self.finish_pattern in message.content:
561
+ return message
562
+ message = self.charter(message)
563
+ while message.formatted["tool_type"]:
564
+ message = self.executor(message, session_id=session_id)
565
+ message = self.charter(message, session_id=session_id)
566
+ if self.finish_pattern in message.content:
567
+ return message
568
+ return message
569
+
570
+ visualizer = DataVisualizer(
571
+ "gpt-4o-2024-05-13",
572
+ research_prompt="You should provide accurate data for the chart generator to use.",
573
+ chart_prompt="Any charts you display will be visible by the user.",
574
+ )
575
+ user_msg = AgentMessage(
576
+ sender='user',
577
+ content="Fetch the China's GDP over the past 5 years, then draw a line graph of it. Once you code it up, finish.")
578
+ bot_msg = visualizer(user_msg)
579
+ print(bot_msg.content)
580
+ json.dump(visualizer.state_dict(), open('visualizer.json', 'w'), ensure_ascii=False, indent=4)
581
+ ````
582
+
583
+ ## Citation
584
+
585
+ If you find this project useful in your research, please consider cite:
586
+
587
+ ```latex
588
+ @misc{lagent2023,
589
+ title={{Lagent: InternLM} a lightweight open-source framework that allows users to efficiently build large language model(LLM)-based agents},
590
+ author={Lagent Developer Team},
591
+ howpublished = {\url{https://github.com/InternLM/lagent}},
592
+ year={2023}
593
+ }
594
+ ```
595
+
596
+ ## License
597
+
598
+ This project is released under the [Apache 2.0 license](LICENSE).
599
+
600
+ <p align="right"><a href="#top">🔼 Back to top</a></p>
lagent.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ setup.cfg
5
+ setup.py
6
+ lagent/__init__.py
7
+ lagent/schema.py
8
+ lagent/version.py
9
+ lagent.egg-info/PKG-INFO
10
+ lagent.egg-info/SOURCES.txt
11
+ lagent.egg-info/dependency_links.txt
12
+ lagent.egg-info/requires.txt
13
+ lagent.egg-info/top_level.txt
14
+ lagent/actions/__init__.py
15
+ lagent/actions/action_executor.py
16
+ lagent/actions/arxiv_search.py
17
+ lagent/actions/base_action.py
18
+ lagent/actions/bing_map.py
19
+ lagent/actions/builtin_actions.py
20
+ lagent/actions/google_scholar_search.py
21
+ lagent/actions/google_search.py
22
+ lagent/actions/ipython_interactive.py
23
+ lagent/actions/ipython_interpreter.py
24
+ lagent/actions/ipython_manager.py
25
+ lagent/actions/parser.py
26
+ lagent/actions/ppt.py
27
+ lagent/actions/python_interpreter.py
28
+ lagent/actions/web_browser.py
29
+ lagent/agents/__init__.py
30
+ lagent/agents/agent.py
31
+ lagent/agents/react.py
32
+ lagent/agents/stream.py
33
+ lagent/agents/aggregator/__init__.py
34
+ lagent/agents/aggregator/default_aggregator.py
35
+ lagent/agents/aggregator/tool_aggregator.py
36
+ lagent/distributed/__init__.py
37
+ lagent/distributed/http_serve/__init__.py
38
+ lagent/distributed/http_serve/api_server.py
39
+ lagent/distributed/http_serve/app.py
40
+ lagent/distributed/ray_serve/__init__.py
41
+ lagent/distributed/ray_serve/ray_warpper.py
42
+ lagent/hooks/__init__.py
43
+ lagent/hooks/action_preprocessor.py
44
+ lagent/hooks/hook.py
45
+ lagent/hooks/logger.py
46
+ lagent/llms/__init__.py
47
+ lagent/llms/base_api.py
48
+ lagent/llms/base_llm.py
49
+ lagent/llms/huggingface.py
50
+ lagent/llms/lmdeploy_wrapper.py
51
+ lagent/llms/meta_template.py
52
+ lagent/llms/openai.py
53
+ lagent/llms/sensenova.py
54
+ lagent/llms/vllm_wrapper.py
55
+ lagent/memory/__init__.py
56
+ lagent/memory/base_memory.py
57
+ lagent/memory/manager.py
58
+ lagent/prompts/__init__.py
59
+ lagent/prompts/prompt_template.py
60
+ lagent/prompts/parsers/__init__.py
61
+ lagent/prompts/parsers/custom_parser.py
62
+ lagent/prompts/parsers/json_parser.py
63
+ lagent/prompts/parsers/str_parser.py
64
+ lagent/prompts/parsers/tool_parser.py
65
+ lagent/utils/__init__.py
66
+ lagent/utils/gen_key.py
67
+ lagent/utils/package.py
68
+ lagent/utils/util.py
69
+ requirements/docs.txt
70
+ requirements/optional.txt
71
+ requirements/runtime.txt
lagent.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+