Compare commits

...

183 Commits

Author SHA1 Message Date
ccurme
6b24eeb884 fireworks[patch]: fix warnings (#26385) 2024-09-12 14:03:24 +00:00
ccurme
d82eec6aad standard-tests[patch]: add standard test for structured output with optional param (#26384) 2024-09-12 09:39:51 -04:00
Bagatur
da037f1c55 docs: fix anthropic docstrings (#26367) 2024-09-12 02:01:07 +00:00
Bagatur
21a43ee3dc docs: update query analysis tutorial (#26363) 2024-09-11 17:13:12 -07:00
Bagatur
28594567de docs: fix structured output doc (#26360) 2024-09-11 16:56:15 -07:00
Bagatur
cacd68b2a7 docs: fix migrate agent (#26353) 2024-09-11 16:08:55 -07:00
Bagatur
1ee432e904 fix api ref reqs (#26357) 2024-09-11 15:19:59 -07:00
ccurme
0bfbd4f7bd community[patch]: release 0.3.0.dev2 (#26356) 2024-09-11 17:00:21 -04:00
ccurme
7ededced7d langchain[patch]: release 0.3.0.dev2 (#26355) 2024-09-11 16:45:55 -04:00
ccurme
284e1a7e9e core[patch]: support pydantic v1 annotations in tool arguments (#26336)
If all pydantic annotations in function signature are V1, use V1
`validate_arguments`.
2024-09-11 16:31:40 -04:00
Bagatur
d67e1dfe32 docs: fix merge messages how-to (#26351) 2024-09-11 13:29:35 -07:00
ccurme
df4fc60312 core[patch]: release 0.3.0.dev5 (#26352) 2024-09-11 16:29:25 -04:00
ccurme
c074922876 core[patch]: fix regression in convert_to_openai_tool with instances of Tool (v0.3rc) (#26349)
Cherry-pick https://github.com/langchain-ai/langchain/pull/26327 from
master.
2024-09-11 16:00:16 -04:00
Eugene Yurtsev
bbb7c267e5 docs[patch]: Improve env variable handling in documentation notebooks (#26347)
Updated using gritql

```
`os.environ[$key] = getpass($msg)` as $M where {
    $M <: ! within if_statement(),
    $M => `if $key not in os.environ: $M`
}
```
2024-09-11 19:39:39 +00:00
Bagatur
20f880ac84 infra: update api ref reqs (#26341) 2024-09-11 11:54:19 -07:00
Erick Friis
9f0968f9d2 docs: v0.3rc docs (#26277)
wip / don't merge
2024-09-11 11:12:39 -07:00
Eugene Yurtsev
8b7f5e1ec0 core[patch]: Remove non relevant todos (#26332)
Clean up TODO(0.3)
2024-09-11 13:38:39 -04:00
Eugene Yurtsev
b9575d586b docs: Handle existing env variables better (#26331)
Update tutorials, how-to and remaining integrations.

Using gritql

```
grit apply --language python '
`os.environ[$key] = getpass.getpass("$msg")` as $M where {
    $M <: ! within if_statement(),
    $M => `if $key not in os.environ: $M`
}' .
```
2024-09-11 13:11:57 -04:00
Eugene Yurtsev
934bc5b030 docs: update integration notebooks handling of env variables (#26330)
Using gritql

```
grit apply --language python '
`os.environ[$key] = getpass.getpass("$msg")` as $M where {
    $M <: ! within if_statement(),
    $M => `if $key not in os.environ: $M`
}' .
```
2024-09-11 13:10:27 -04:00
Eugene Yurtsev
da48c98eaf core[patch]: Add type literal to LLMResult (#26282)
Needed for LangServe serialization/deserializastion of callback events.
2024-09-11 11:46:13 -04:00
Eugene Yurtsev
844955d6e1 community[patch]: assign missed default (#26326)
Assigning missed defaults in various classes. Most clients were being
assigned during the `model_validator(mode="before")` step, so this
change should amount to a no-op in those cases.

---

This PR was autogenerated using gritql

```shell

grit apply 'class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
' --language python .

```
2024-09-11 11:13:11 -04:00
Eugene Yurtsev
c417bbc313 core[patch]: Add default None to StructuredTool func (#26324)
This PR was autogenerated using gritql, tests written manually

```shell

grit apply 'class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
' --language python .

```
2024-09-11 11:12:52 -04:00
Eugene Yurtsev
edcd348ce7 huggingface,box[patch]: Add missing default None (#26323)
This PR was autogenerated using gritql

```
engine marzano(0.1)
language python

class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
```


```shell

grit apply 'class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
' --language python .

```
2024-09-11 11:07:31 -04:00
Eugene Yurtsev
d3499cc90b langchain[patch]: Assign appropriate default for Optional/Any types (#26325)
This PR was autogenerated using gritql

```
engine marzano(0.1)
language python

class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
```


```shell

grit apply 'class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
' --language python .

```
2024-09-11 11:06:30 -04:00
Bagatur
2b15183980 docs: run tool, query analysis how tos (#26306) 2024-09-11 03:31:25 +00:00
Erick Friis
157d32b294 prompty: fix core dep for min testing (#26300) 2024-09-11 02:28:59 +00:00
Erick Friis
ec49d0d0b4 infra: codespell on all pr branches (#26299) 2024-09-10 19:21:53 -07:00
Erick Friis
19bce9aba1 community: model config cassandradatabasetool (#26296) 2024-09-10 19:17:22 -07:00
Bagatur
a56812f970 docs: replace gpt-3.5-turbo-0125 and -1106 with gpt-4o-mini (#26292) 2024-09-10 16:23:05 -07:00
ccurme
89c6cb6f8b mongo[patch]: release 0.2.0.dev1 (#26289) 2024-09-10 19:03:45 -04:00
Bagatur
aa9f247803 core[patch]: manually coerce ToolMessage args (#26283) 2024-09-10 15:57:57 -07:00
Bagatur
fce9322d2e core[patch]: use pydantic.v1 in old tracer code (#26290)
Thank you for contributing to LangChain!

- [ ] **PR title**: "package: description"
- Where "package" is whichever of langchain, community, core,
experimental, etc. is being modified. Use "docs: ..." for purely docs
changes, "templates: ..." for template changes, "infra: ..." for CI
changes.
  - Example: "community: add foobar LLM"


- [ ] **PR message**: ***Delete this entire checklist*** and replace
with
    - **Description:** a description of the change
    - **Issue:** the issue # it fixes, if applicable
    - **Dependencies:** any dependencies required for this change
- **Twitter handle:** if your PR gets announced, and you'd like a
mention, we'll gladly shout you out!


- [ ] **Add tests and docs**: If you're adding a new integration, please
include
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.


- [ ] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

Additional guidelines:
- Make sure optional dependencies are imported within a function.
- Please do not add dependencies to pyproject.toml files (even optional
ones) unless they are required for unit tests.
- Most PRs should not touch more than one package.
- Changes should be backwards compatible.
- If you are adding something to community, do not re-import it in
langchain.

If no one reviews your PR within a few days, please @-mention one of
baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
2024-09-10 15:48:02 -07:00
ccurme
e2c8690e7f community[patch]: fix huggingface pydantic 2 init (#26286) 2024-09-10 18:15:39 -04:00
Eugene Yurtsev
287911adbc community[patch]: Add protected_namespace=() to models that use model namespace (#26284)
Add protected_namespaces=() for existing implementations that use the
pydantic
reserved model namespace.
2024-09-10 17:32:10 -04:00
ccurme
fc08f240ee huggingface[patch]: bump deps and increment version to 0.1.0.dev1 (#26263) 2024-09-10 16:49:12 -04:00
Eugene Yurtsev
bee8994b7e langchain,core: Deprecate pydantic.v1 shims (#26280)
Deprecate shim namespace
2024-09-10 16:19:20 -04:00
Bagatur
c62772885a docs: migrate pydantic imports (#26266) 2024-09-10 13:06:58 -07:00
Eugene Yurtsev
374bb40852 community: Force opt-in for qa chains (#26278)
The underlying code is already documented as requiring appropriate RBAC
control, but adding a forced user opt-in to make sure that users
that don't read documentation are still aware of what's required
from a security perspective.

https://huntr.com/bounties/8f4ad910-7fdc-4089-8f0a-b5df5f32e7c5
2024-09-10 15:59:03 -04:00
Bagatur
c43c62b6c9 merge msater (#26275) 2024-09-10 12:57:18 -07:00
Eugene Yurtsev
bb83f1b875 community[patch]: Resolve parse_obj (#26262)
Update parse_obj
2024-09-10 15:56:34 -04:00
Eugene Yurtsev
ed412d89b4 core[patch]: Do not hard coded name for RunnableBranch (#26276)
This is no longer necessary
2024-09-10 15:49:51 -04:00
Bagatur
789f4b1c9c fmt 2024-09-10 12:48:06 -07:00
Bagatur
13ba15b2cc Merge branch 'v0.3rc' into bagatur/09-10/v0.3_merge_master 2024-09-10 12:44:49 -07:00
Bagatur
b904763115 fmt 2024-09-10 12:44:11 -07:00
Bagatur
cecdc119bf merge 2024-09-10 12:32:47 -07:00
Eugene Yurtsev
7975c1f0ca core[patch]: Propagate module name to create model (#26267)
* This allows pydantic to correctly resolve annotations necessary for
building pydantic models dynamically.
* Makes a small fix for RunnableWithMessageHistory which was fetching
the OutputType from the RunnableLambda that was yielding another
RunnableLambda. This doesn't propagate the output of the RunnableAssign
fully (i.e., with concrete type information etc.)

Resolves issue: https://github.com/langchain-ai/langchain/issues/26250
2024-09-10 15:22:56 -04:00
ccurme
622cb7d2cf huggingface[patch]: update integration tests on v0.3rc branch (#26274)
cherry-pick these from master branch
2024-09-10 15:06:48 -04:00
ccurme
3a0c7c705c ollama[patch]: bump core dep and increment version to 0.2.0.dev1 (#26271)
Confirmed no pydantic warnings in unit or integration tests.
2024-09-10 14:41:53 -04:00
Bagatur
162d3ff54b docs: fix language_models docstring (#26268) 2024-09-10 10:41:28 -07:00
Bagatur
301be2d40a core[patch]: de-beta astream_events (#26248) 2024-09-10 10:09:29 -07:00
ccurme
6df9178056 groq[patch]: bump deps (#26261)
To confirm no pydantic warnings.
2024-09-10 10:42:56 -04:00
ccurme
6208773c77 community[patch]: set protected namespaces on embeddings (#26156)
Also fix serdes test for langchain-google-genai.
2024-09-10 09:28:41 -04:00
Bagatur
e24259fee7 docs: update pydantic api ref templates (#26246) 2024-09-09 18:14:12 -07:00
Bagatur
9132516c84 core[patch]: add back ChatModel.callback_manager deprecation (#26244) 2024-09-09 17:46:48 -07:00
Bagatur
f2f9187919 community[patch]: fix community warnings 1 (#26239) 2024-09-09 17:27:00 -07:00
Bagatur
438301db90 community[patch]: update docstrings (#26243) 2024-09-09 17:16:56 -07:00
Bagatur
7842e2c460 langchain[patch]: docstring update (#26242) 2024-09-09 17:12:32 -07:00
Bagatur
949127fbc1 infra: rm check pydantic scripts (#26240) 2024-09-09 17:12:16 -07:00
Bagatur
baad44965e core[patch]: update docstrings (#26241) 2024-09-09 17:04:57 -07:00
Bagatur
d588ce1f29 community[patch]: fix extended deps (#26238) 2024-09-09 16:10:10 -07:00
ccurme
2070d659a0 mistralai: release 0.2.0.dev1 (#26236) 2024-09-09 16:09:23 -04:00
ccurme
6c8d626d70 experimental[major]: upgrade pydantic (#26228) 2024-09-09 14:27:24 -04:00
Bagatur
109ba548bd langchain[patch]: fix pydantic 2 warnings (#26188) 2024-09-09 11:19:02 -07:00
Bagatur
0f4a087186 community[patch]: fix main unit tests (#26189) 2024-09-09 10:35:41 -07:00
Bagatur
71268f7a15 standard-tests[patch]: resolve pydantic warnings (#26190) 2024-09-09 10:34:57 -07:00
Eugene Yurtsev
b8fc82b84b core[patch]: Fix _get_type in AnyMessage (#26223)
Fix _get_type to work on deserialization path as well and add a unit test.
2024-09-09 10:33:18 -04:00
Bagatur
179eeead81 anthropic[patch]: pydantic ^2.7.4 (#26191) 2024-09-08 11:46:19 -07:00
Bagatur
7a57b4fbbf core[patch]: Release 0.3.0dev4 (#26178) 2024-09-06 18:49:41 -04:00
Bagatur
d9ba65ca26 core[patch]: pydantic 2.7-compatible AnyMessage (#26177) 2024-09-06 18:44:06 -04:00
Eugene Yurtsev
0319ccd273 core[patch]: only support pydantic >= 2.9 for now (#26176)
For now we'll only support pydantic ^ 2.9. We'll relax the constraint
next week once we work around some issues with pydantic 2.7 / 2.8.
2024-09-06 18:13:17 -04:00
Eugene Yurtsev
6e2a72c218 core[patch]: Add missing cache for create_model (#26173)
It makes a big difference for performance.
2024-09-06 17:59:18 -04:00
Bagatur
9f482f4284 cherry langsmith cache fix (#26169) 2024-09-06 17:47:47 -04:00
Erick Friis
15466d89a2 infra: core remove 3.8 (#26172) 2024-09-06 14:47:16 -07:00
Eugene Yurtsev
61087b0c0d core[patch]: Fix changes to pydantic schema due to pydantic 2.8.2 -> 2.9 changes (#26166)
Minor non functional change in pydantic schema generation
2024-09-06 17:24:10 -04:00
Bagatur
b2ba4f4072 core[patch]: fix deprecated pydantic code (#26161) 2024-09-06 17:14:17 -04:00
Bagatur
b2c8f2de4c core[patch]: fix ChatPromptValueConcrete typing (#26106)
Thank you for contributing to LangChain!

- [ ] **PR title**: "package: description"
- Where "package" is whichever of langchain, community, core,
experimental, etc. is being modified. Use "docs: ..." for purely docs
changes, "templates: ..." for template changes, "infra: ..." for CI
changes.
  - Example: "community: add foobar LLM"


- [ ] **PR message**: ***Delete this entire checklist*** and replace
with
    - **Description:** a description of the change
    - **Issue:** the issue # it fixes, if applicable
    - **Dependencies:** any dependencies required for this change
- **Twitter handle:** if your PR gets announced, and you'd like a
mention, we'll gladly shout you out!


- [ ] **Add tests and docs**: If you're adding a new integration, please
include
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.


- [ ] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

Additional guidelines:
- Make sure optional dependencies are imported within a function.
- Please do not add dependencies to pyproject.toml files (even optional
ones) unless they are required for unit tests.
- Most PRs should not touch more than one package.
- Changes should be backwards compatible.
- If you are adding something to community, do not re-import it in
langchain.

If no one reviews your PR within a few days, please @-mention one of
baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
2024-09-06 17:13:57 -04:00
Bagatur
6df9360e32 core[patch]: remove v1_repr (#26165)
Co-authored-by: Erick Friis <erick@langchain.dev>
2024-09-06 17:00:52 -04:00
Erick Friis
b664b3364c multiple: merge master into v0.3rc branch (#26163)
Signed-off-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
Co-authored-by: Tomaz Bratanic <bratanic.tomaz@gmail.com>
Co-authored-by: ZhangShenao <15201440436@163.com>
Co-authored-by: Friso H. Kingma <fhkingma@gmail.com>
Co-authored-by: Chester Curme <chester.curme@gmail.com>
Co-authored-by: ChengZi <chen.zhang@zilliz.com>
2024-09-06 13:42:29 -07:00
Bagatur
bccc546a25 v0.3 dev releases (#26096)
branch for cutting dev releases

---------

Co-authored-by: Chester Curme <chester.curme@gmail.com>
Co-authored-by: Erick Friis <erick@langchain.dev>
2024-09-06 13:35:19 -07:00
Erick Friis
6405e7fa07 infra: ignore docs build in v0.3rc branch (#25990) 2024-09-06 13:24:13 -07:00
Erick Friis
ae24f7364d multiple: version bumps (#26159) 2024-09-06 12:37:17 -07:00
Erick Friis
81f8c2f33d infra: add pydantic to min version testing (#26152) 2024-09-06 12:09:56 -07:00
ccurme
c27703a10f core[patch]: resolve warnings (#26157)
Resolve a batch of warnings
2024-09-06 15:00:53 -04:00
ccurme
1b77063c88 openai[patch]: set protected namespaces on embeddings (#26155) 2024-09-06 13:00:41 -04:00
ccurme
b74546a458 core[patch]: add google genai to serialization (#26154) 2024-09-06 12:54:16 -04:00
Bagatur
8a3a9c8968 core[patch]: concrete prompt value test (#26128) 2024-09-05 20:49:05 -04:00
Erick Friis
776d01db49 infra: remove fail fast in v0.3rc branch (#26127) 2024-09-05 17:32:17 -07:00
Eugene Yurtsev
40b43b0bfb core[patch]: Remove some usage of .copy() in favor of .model_copy() (#26126)
Address under place where deprecated functionality is used.
2024-09-05 18:34:43 -04:00
Eugene Yurtsev
6fd4ac4283 core[patch]: Replace @validator with @model_validator in length based example selector (#26124)
Resolves another warning from usage of deprecated functionality in
pydantic 2
2024-09-05 18:26:43 -04:00
Eugene Yurtsev
f4e7cb394f core[patch]: Ignore pydantic deprecation warnings in validate_arguments (#26122)
For now, we'll use the deprecation functionality which is present until
pydantic 3.
2024-09-05 18:23:48 -04:00
Eugene Yurtsev
1ecaffab8a core[patch]: Fix regression in core (#26121)
Limited to unit testing code -- did not cause any actual issues
2024-09-05 17:41:36 -04:00
ccurme
5bbd5364f1 core[patch]: call RunnableConfigurableFields.model_rebuild() (#26118)
To fix a test in `langchain`
2024-09-05 16:59:52 -04:00
Eugene Yurtsev
e02b093d81 community[patch]: Fix more issues (#26116)
This PR resolves more type checking issues and fixes some bugs.
2024-09-05 16:31:21 -04:00
Eugene Yurtsev
0cc6584889 community[patch]: Resolve more linting issues (#26115)
Resolve a bunch of errors caught with mypy
2024-09-05 15:59:30 -04:00
Eugene Yurtsev
6e1b0d0228 community[patch]: Skip unit test that depends on langchain-aws and fix pydantic settings (#26111)
* Skip unit test that depends on langchain-aws
* fix pydantic settings
2024-09-05 15:08:34 -04:00
Eugene Yurtsev
a111098230 community[patch]: Remove usage of deprecated pydantic config option (#26107)
Remove usage of deprecated pydantic config option
2024-09-05 15:05:00 -04:00
ccurme
9e7222618b core: reduce warnings (#26108) 2024-09-05 15:04:41 -04:00
Harrison Chase
8516a03a02 langchain-community[major]: Upgrade community to pydantic 2 (#26011)
This PR upgrades langchain-community to pydantic 2.


* Most of this PR was auto-generated using code mods with gritql
(https://github.com/eyurtsev/migrate-pydantic/tree/main)
* Subsequently, some code was fixed manually due to accommodate
differences between pydantic 1 and 2

Breaking Changes:

- Use TEXTEMBED_API_KEY and TEXTEMBEB_API_URL for env variables for text
embed integrations:
cbea780492

Other changes:

- Added pydantic_settings as a required dependency for community. This
may be removed if we have enough time to convert the dependency into an
optional one.

---------

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-09-05 14:07:10 -04:00
ccurme
1ad66e70dc text-splitters[major]: update core dep + drop support for python 3.8 (#26102) 2024-09-05 13:41:28 -04:00
Bagatur
76564edd3a openai[patch]: update configurable model dumps (#26101) 2024-09-05 13:26:40 -04:00
Eugene Yurtsev
1c51e1693d core[patch]: Fix issue with adapter utility for pydantic repr (#26099)
This repr will be deleted prior to release -- it's temporarily here to
make it easy to separate code changes in langchain vs. code changes
stemming from breaking changes in pydantic
2024-09-05 12:27:01 -04:00
Eugene Yurtsev
a267da6a3a core[minor]: Add type overload for secret_from_env factory (#26091)
Add type overload
2024-09-05 11:52:19 -04:00
Bagatur
8da2ace99d openai[patch]: update snapshots (#26098) 2024-09-05 11:41:14 -04:00
ccurme
e358846b39 core[patch]: add bedrock to load.mapping (#26094) 2024-09-05 10:56:46 -04:00
Eugene Yurtsev
3c598d25a6 core[minor]: Add get_input_jsonschema, get_output_jsonschema, get_config_jsonschema (#26034)
This PR adds methods to directly get the json schema for inputs,
outputs, and config.
Currently, it's delegating to the underlying pydantic implementation,
but this may be changed in the future to be independent of pydantic.
2024-09-05 10:36:42 -04:00
ccurme
e5aa0f938b mongo[major]: upgrade pydantic (#26053) 2024-09-05 09:05:41 -04:00
Bagatur
79c46319dd couchbase[patch]: rm pydantic usage (#26068) 2024-09-04 16:29:14 -07:00
ccurme
c5d4dfefc0 prompty[major]: upgrade pydantic (#26056) 2024-09-04 19:26:18 -04:00
ccurme
6e853501ec voyageai[major]: upgrade pydantic (#26070) 2024-09-04 18:59:13 -04:00
Bagatur
fd1f3ca213 exa[major]: use pydantic v2 (#26069) 2024-09-04 15:02:05 -07:00
Bagatur
567a4ce5aa box[major]: use pydantic v2 (#26067) 2024-09-04 14:51:53 -07:00
ccurme
923ce84aa7 robocorp[major]: upgrade pydantic (#26062) 2024-09-04 17:10:15 -04:00
Eugene Yurtsev
9379613132 langchain[major]: Upgrade langchain to be pydantic 2 compatible (#26050)
Upgrading the langchain package to be pydantic 2 compatible.

Had to remove some parts of unit tests in parsers that were relying on
spying on methods since that fails with pydantic 2. The unit tests don't
seem particularly good, so can be re-written at a future date.

Depends on: https://github.com/langchain-ai/langchain/pull/26057

Most of this PR was done using gritql for code mods, followed by some
fixes done manually to account for changes made by pydantic
2024-09-04 16:59:07 -04:00
Bagatur
c72a76237f cherry-pick 88e9e6b (#26063) 2024-09-04 13:50:42 -07:00
Bagatur
f9cafcbcb0 pinecone[patch]: rm pydantic lint script (#26052) 2024-09-04 13:49:09 -07:00
Bagatur
1fce5543bc poetry lock 2024-09-04 13:44:51 -07:00
Bagatur
88e9e6bf55 core,standard-tests[patch]: add Ser/Des test and update serialization mapping (#26042) 2024-09-04 13:38:03 -07:00
Bagatur
7f0dd4b182 fmt 2024-09-04 13:31:29 -07:00
Bagatur
5557b86a54 fmt 2024-09-04 13:31:29 -07:00
Bagatur
caf4ae3a45 fmt 2024-09-04 13:31:28 -07:00
Bagatur
c88b75ca6a fmt 2024-09-04 13:30:02 -07:00
Bagatur
e409a85a28 fmt 2024-09-04 13:29:24 -07:00
Bagatur
40634d441a make 2024-09-04 13:29:24 -07:00
Bagatur
1d2a503ab8 standard-tests[patch]: add Ser/Des test 2024-09-04 13:29:20 -07:00
ccurme
b924c61440 qdrant[major]: drop support for python 3.8 (#26061) 2024-09-04 16:22:54 -04:00
Eugene Yurtsev
efa10c8ef8 core[minor]: Add message chunks to AnyMessage (#26057)
Adds the chunk variant of each Message to AnyMessage.

Required for this PR:
https://github.com/langchain-ai/langchain/pull/26050/files
2024-09-04 15:36:22 -04:00
ccurme
0a6c67ce6a nomic: drop support for python 3.8 (#26055) 2024-09-04 15:30:00 -04:00
ccurme
ed771f2d2b huggingface[major]: upgrade pydantic (#26048) 2024-09-04 15:08:43 -04:00
ccurme
63ba12d8e0 milvus: drop support for python 3.8 (#26051)
to be consistent with core
2024-09-04 14:54:45 -04:00
Bagatur
f785cf029b pinecone[major]: Update to pydantic v2 (#26039) 2024-09-04 11:28:54 -07:00
ccurme
be7cd0756f ollama[major]: upgrade pydantic (#26044) 2024-09-04 13:54:52 -04:00
ccurme
51c6899850 groq[major]: upgrade pydantic (#26036) 2024-09-04 13:41:40 -04:00
ccurme
163d6fe8ef anthropic: update pydantic (#26000)
Migrated with gritql: https://github.com/eyurtsev/migrate-pydantic
2024-09-04 13:35:51 -04:00
ccurme
7cee7fbfad mistralai: update pydantic (#25995)
Migrated with gritql: https://github.com/eyurtsev/migrate-pydantic
2024-09-04 13:26:17 -04:00
ccurme
4799ad95d0 core[patch]: remove warnings from protected namespaces on RunnableSerializable (#26040) 2024-09-04 13:10:08 -04:00
Bagatur
88065d794b fmt 2024-09-04 09:52:01 -07:00
Bagatur
b27bfa6717 pinecone[major]: Update to pydantic v2 2024-09-04 09:50:39 -07:00
Bagatur
5adeaf0732 openai[major]: switch to pydantic v2 (#26001) 2024-09-04 09:18:29 -07:00
Bagatur
f9d91e19c5 fireworks[major]: switch to pydantic v2 (#26004) 2024-09-04 09:18:10 -07:00
Bagatur
4c7afb0d6c Update libs/partners/openai/langchain_openai/llms/base.py 2024-09-03 23:36:19 -07:00
Bagatur
c1ff61669d Update libs/partners/openai/langchain_openai/llms/base.py 2024-09-03 23:36:14 -07:00
Bagatur
54d6808c1e Update libs/partners/openai/langchain_openai/llms/azure.py 2024-09-03 23:36:08 -07:00
Bagatur
78468de2e5 Update libs/partners/openai/langchain_openai/llms/azure.py 2024-09-03 23:36:02 -07:00
Bagatur
76572f963b Update libs/partners/openai/langchain_openai/embeddings/base.py 2024-09-03 23:35:56 -07:00
Bagatur
c0448f27ba Update libs/partners/openai/langchain_openai/embeddings/base.py 2024-09-03 23:35:51 -07:00
Bagatur
179aaa4007 Update libs/partners/openai/langchain_openai/embeddings/azure.py 2024-09-03 23:35:43 -07:00
Bagatur
d072d592a1 Update libs/partners/openai/langchain_openai/embeddings/azure.py 2024-09-03 23:35:35 -07:00
Bagatur
78c454c130 Update libs/partners/openai/langchain_openai/chat_models/base.py 2024-09-03 23:35:30 -07:00
Bagatur
5199555c0d Update libs/partners/openai/langchain_openai/chat_models/base.py 2024-09-03 23:35:26 -07:00
Bagatur
5e31cd91a7 Update libs/partners/openai/langchain_openai/chat_models/azure.py 2024-09-03 23:35:21 -07:00
Bagatur
49a1f5dd47 Update libs/partners/openai/langchain_openai/chat_models/azure.py 2024-09-03 23:35:15 -07:00
Bagatur
d0cc9b022a Update libs/partners/fireworks/langchain_fireworks/chat_models.py 2024-09-03 23:30:56 -07:00
Bagatur
a91bd2737a Update libs/partners/fireworks/langchain_fireworks/chat_models.py 2024-09-03 23:30:49 -07:00
Bagatur
5ad2b8ce80 Merge branch 'v0.3rc' into bagatur/fireworks_0.3 2024-09-03 23:29:07 -07:00
Bagatur
b78764599b Merge branch 'v0.3rc' into bagatur/openai_attempt_2 2024-09-03 23:28:50 -07:00
Bagatur
2888e34f53 infra: remove pydantic v1 tests (#26006) 2024-09-03 23:27:52 -07:00
Bagatur
dd4418a503 rm requires 2024-09-03 23:26:13 -07:00
Bagatur
a976f2071b Merge branch 'v0.3rc' into bagatur/rm_pydantic_v1_ci 2024-09-03 19:06:22 -07:00
Eugene Yurtsev
5f98975be0 core[patch]: Fix injected args in tool signature (#25991)
- Fix injected args in tool signature
- Fix another unit test that was using the wrong namespace import in
pydantic
2024-09-03 21:53:50 -04:00
Bagatur
0529c991ce rm 2024-09-03 18:02:12 -07:00
Bagatur
954abcce59 infra: remove pydantic v1 tests 2024-09-03 18:01:34 -07:00
Bagatur
6ad515d34e Merge branch 'v0.3rc' into bagatur/fireworks_0.3 2024-09-03 17:51:46 -07:00
Bagatur
99348e1614 Merge branch 'v0.3rc' into bagatur/openai_attempt_2 2024-09-03 17:51:27 -07:00
Bagatur
2c742cc20d standard-tests[major]: use pydantic v2 (#26005) 2024-09-03 17:50:45 -07:00
Bagatur
02f87203f7 standard-tests[major]: use pydantic v2 2024-09-03 17:48:20 -07:00
Bagatur
56163481dd fmt 2024-09-03 17:46:41 -07:00
Bagatur
6aac2eeab5 fmt 2024-09-03 17:42:22 -07:00
Bagatur
559d8a4d13 fireworks[major]: switch to pydantic v2 2024-09-03 17:41:28 -07:00
Bagatur
ec9e8eb71c fmt 2024-09-03 17:24:24 -07:00
Bagatur
9399df7777 fmt 2024-09-03 16:57:42 -07:00
Bagatur
5fc1104d00 fmt 2024-09-03 16:51:14 -07:00
Bagatur
6777106fbe fmt 2024-09-03 16:50:17 -07:00
Bagatur
5f5287c3b0 fmt 2024-09-03 16:48:53 -07:00
Bagatur
615f8b0d47 openai[major]: switch to pydantic v2 2024-09-03 16:33:35 -07:00
Bagatur
9a9ab65030 merge master correctly (#25999) 2024-09-03 14:57:29 -07:00
Bagatur
241b6d2355 Revert "merge master (#25997)" (#25998) 2024-09-03 14:55:28 -07:00
Bagatur
91e09ffee5 merge master (#25997)
Co-authored-by: Dan O'Donovan <dan.odonovan@gmail.com>
Co-authored-by: Tom Daniel Grande <tomdgrande@gmail.com>
Co-authored-by: Grande <Tom.Daniel.Grande@statsbygg.no>
Co-authored-by: Erick Friis <erick@langchain.dev>
Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
2024-09-03 14:51:26 -07:00
Eugene Yurtsev
8e4bae351e core[major]: Drop python 3.8 support (#25996)
Drop python 3.8 support as EOL is 2024 October
2024-09-03 14:47:27 -07:00
Erick Friis
0da201c1d5 core: fix lint 0.3rc (#25993) 2024-09-03 17:13:52 -04:00
Erick Friis
29413a22e1 infra: also run lint/test on rc (#25992) 2024-09-03 14:02:49 -07:00
Eugene Yurtsev
ae5a574aa5 core[major]: Upgrade langchain-core to pydantic 2 (#25986)
This PR upgrades core to pydantic 2.

It involves a combination of manual changes together with automated code
mods using gritql.

Changes and known issues:

1. Current models override __repr__ to be consistent with pydantic 1
(this will be removed in a follow up PR)
Related:
https://github.com/langchain-ai/langchain/pull/25986/files#diff-e5bd296179b7a72fcd4ea5cfa28b145beaf787da057e6d122aa76ee0bb8132c9R74
2. Issue with decorator for BaseChatModel
(https://github.com/langchain-ai/langchain/pull/25986/files#diff-932bf3b314b268754ef640a5b8f52da96f9024fb81dd388dcd166b5713ecdf66R202)
-- cc @baskaryan
3. `name` attribute in Base Runnable does not have a default -- was
raising a pydantic warning due to override. We need to see if there's a
way to fix to avoid making a breaking change for folks with custom
runnables.
(https://github.com/langchain-ai/langchain/pull/25986/files#diff-836773d27f8565f4dd45e9d6cf828920f89991a880c098b7511e0d3bb78a8a0dR238)
4. Likely can remove hard-coded RunnableBranch name
(https://github.com/langchain-ai/langchain/pull/25986/files#diff-72894b94f70b1bfc908eb4d53f5ff90bb33bf8a4240a5e34cae48ddc62ac313aR147)
5. `model_*` namespace is reserved in pydantic. We'll need to specify
`protected_namespaces`
6. create_model does not have a cached path yet
7. get_input_schema() in many places has been updated to be explicit
about whether parameters are required or optional
8. injected tool args aren't picked up properly (losing type annotation)

For posterity the following gritql migrations were used:

```
engine marzano(0.1)
language python

or {
    `from $IMPORT import $...` where {
        $IMPORT <: contains `pydantic_v1`,
        $IMPORT => `pydantic`
    },
    `$X.update_forward_refs` => `$X.model_rebuild`,
  // This pattern still needs fixing as it fails (populate_by_name vs.
  // allow_populate_by_name)
  class_definition($name, $body) as $C where {
      $name <: `Config`,
      $body <: block($statements),
      $t = "",
      $statements <: some bubble($t) assignment(left=$x, right=$y) as $A where {    
        or {
            $x <: `allow_population_by_field_name` where {
                $t += `populate_by_name=$y,`
            },
            $t += `$x=$y,`
        }
      },
      $C => `model_config = ConfigDict($t)`,
      add_import(source="pydantic", name="ConfigDict")
  }
}

```



```
engine marzano(0.1)
language python

`@root_validator(pre=True)` as $decorator where {
    $decorator <: before function_definition($body, $return_type),
    $decorator => `@model_validator(mode="before")\n@classmethod`,
    add_import(source="pydantic", name="model_validator"),
    $return_type => `Any`
}
```

```
engine marzano(0.1)
language python

`@root_validator(pre=False, skip_on_failure=True)` as $decorator where {
    $decorator <: before function_definition($body, $parameters, $return_type) where {
        $body <: contains bubble or {
            `values["$Q"]` => `self.$Q`,
            `values.get("$Q")` => `(self.$Q or None)`,
            `values.get($Q, $...)` as $V where {
                $Q <: contains `"$QName"`,
                $V => `self.$QName`,
            },
            `return $Q` => `return self`
        }
    },
    $decorator => `@model_validator(mode="after")`,
    // Silly work around a bug in grit
    // Adding Self to pydantic and then will replace it with one from typing
    add_import(source="pydantic", name="model_validator"),
    $parameters => `self`,
    $return_type => `Self`
}

```

```
grit apply --language python '`Self` where { add_import(source="typing_extensions", name="Self")}'
```
2024-09-03 16:30:44 -04:00
Erick Friis
5a0e82c31c infra: fix 0.3rc ci check (#25988) 2024-09-03 12:20:08 -07:00
Erick Friis
8590b421c4 infra: ignore core dependents for 0.3rc (#25980) 2024-09-03 11:06:45 -07:00
1187 changed files with 32077 additions and 20247 deletions

View File

@@ -16,6 +16,10 @@ LANGCHAIN_DIRS = [
"libs/experimental",
]
# for 0.3rc, we are ignoring core dependents
# in order to be able to get CI to pass for individual PRs.
IGNORE_CORE_DEPENDENTS = True
# ignored partners are removed from dependents
# but still run if directly edited
IGNORED_PARTNERS = [
@@ -102,9 +106,9 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
if dir_ == "libs/core":
return [
{"working-directory": dir_, "python-version": f"3.{v}"}
for v in range(8, 13)
for v in range(9, 13)
]
min_python = "3.8"
min_python = "3.9"
max_python = "3.12"
# custom logic for specific directories
@@ -184,6 +188,9 @@ if __name__ == "__main__":
# for extended testing
found = False
for dir_ in LANGCHAIN_DIRS:
if dir_ == "libs/core" and IGNORE_CORE_DEPENDENTS:
dirs_to_run["extended-test"].add(dir_)
continue
if file.startswith(dir_):
found = True
if found:

View File

@@ -11,7 +11,7 @@ if __name__ == "__main__":
# see if we're releasing an rc
version = toml_data["tool"]["poetry"]["version"]
releasing_rc = "rc" in version
releasing_rc = "rc" in version or "dev" in version
# if not, iterate through dependencies and make sure none allow prereleases
if not releasing_rc:

View File

@@ -15,6 +15,7 @@ MIN_VERSION_LIBS = [
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
"pydantic",
]
SKIP_IF_PULL_REQUEST = ["langchain-core"]

View File

@@ -1,114 +0,0 @@
name: dependencies
on:
workflow_call:
inputs:
working-directory:
required: true
type: string
description: "From which folder this pipeline executes"
langchain-location:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
jobs:
build:
defaults:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: dependency checks ${{ inputs.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: pydantic-cross-compat
- name: Install dependencies
shell: bash
run: poetry install
- name: Check imports with base dependencies
shell: bash
run: poetry run make check_imports
- name: Install test dependencies
shell: bash
run: poetry install --with test
- name: Install langchain editable
working-directory: ${{ inputs.working-directory }}
if: ${{ inputs.langchain-location }}
env:
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
run: |
poetry run pip install -e "$LANGCHAIN_LOCATION"
- name: Install the opposite major version of pydantic
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
shell: bash
# airbyte currently doesn't support pydantic v2
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
run: |
# Determine the major part of pydantic version
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
if [[ "$REGULAR_VERSION" == "1" ]]; then
PYDANTIC_DEP=">=2.1,<3"
TEST_WITH_VERSION="2"
elif [[ "$REGULAR_VERSION" == "2" ]]; then
PYDANTIC_DEP="<2"
TEST_WITH_VERSION="1"
else
echo "Unexpected pydantic major version '$REGULAR_VERSION', cannot determine which version to use for cross-compatibility test."
exit 1
fi
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
# which would prevent caching from working: the cache would get saved
# to a different key than where it gets loaded from.
poetry run pip install "pydantic${PYDANTIC_DEP}"
# Ensure that the correct pydantic is installed now.
echo "Checking pydantic version... Expecting ${TEST_WITH_VERSION}"
# Determine the major part of pydantic version
CURRENT_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
# Check that the major part of pydantic version is as expected, if not
# raise an error
if [[ "$CURRENT_VERSION" != "$TEST_WITH_VERSION" ]]; then
echo "Error: expected pydantic version ${CURRENT_VERSION} to have been installed, but found: ${TEST_WITH_VERSION}"
exit 1
fi
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
- name: Run pydantic compatibility tests
# airbyte currently doesn't support pydantic v2
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
shell: bash
run: make test
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@@ -46,6 +46,7 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
fail-fast: false
uses: ./.github/workflows/_lint.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
@@ -59,6 +60,7 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test) }}
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
@@ -71,6 +73,7 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
fail-fast: false
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
with:
@@ -83,25 +86,13 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
fail-fast: false
uses: ./.github/workflows/_compile_integration_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
secrets: inherit
dependencies:
name: cd ${{ matrix.job-configs.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.dependencies != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
uses: ./.github/workflows/_dependencies.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
secrets: inherit
extended-tests:
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
needs: [ build ]
@@ -110,6 +101,7 @@ jobs:
matrix:
# note different variable for extended test dirs
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
fail-fast: false
runs-on: ubuntu-latest
defaults:
run:
@@ -149,7 +141,7 @@ jobs:
echo "$STATUS" | grep 'nothing to commit, working tree clean'
ci_success:
name: "CI Success"
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
needs: [build, lint, test, compile-integration-tests, extended-tests, test-doc-imports]
if: |
always()
runs-on: ubuntu-latest

View File

@@ -5,7 +5,6 @@ on:
push:
branches: [master, v0.1]
pull_request:
branches: [master, v0.1]
permissions:
contents: read

View File

@@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
python-version:
- "3.8"
- "3.9"
- "3.11"
working-directory:
- "libs/partners/openai"

View File

@@ -36,7 +36,6 @@ api_docs_build:
API_PKG ?= text-splitters
api_docs_quick_preview:
poetry run pip install "pydantic<2"
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
cd docs/api_reference && poetry run make html
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/

View File

@@ -90,7 +90,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"# Please manually enter OpenAI Key"
]
},

View File

@@ -33,8 +33,8 @@ install-py-deps:
python3 -m venv .venv
$(PYTHON) -m pip install --upgrade pip
$(PYTHON) -m pip install --upgrade uv
$(PYTHON) -m uv pip install -r vercel_requirements.txt
$(PYTHON) -m uv pip install --editable $(PARTNER_DEPS_LIST)
$(PYTHON) -m uv pip install --pre -r vercel_requirements.txt
$(PYTHON) -m uv pip install --pre --editable $(PARTNER_DEPS_LIST)
generate-files:
mkdir -p $(INTERMEDIATE_DIR)

View File

@@ -1,5 +1,5 @@
autodoc_pydantic>=1,<2
sphinx<=7
autodoc_pydantic>=2,<3
sphinx>=8,<9
myst-parser>=3
sphinx-autobuild>=2024
pydata-sphinx-theme>=0.15
@@ -8,4 +8,4 @@ myst-nb>=1.1.1
pyyaml
sphinx-design
sphinx-copybutton
beautifulsoup4
beautifulsoup4

View File

@@ -17,7 +17,10 @@ def process_toc_h3_elements(html_content: str) -> str:
# Process each element
for element in toc_h3_elements:
element = element.a.code.span
try:
element = element.a.code.span
except Exception:
continue
# Get the text content of the element
content = element.get_text()

View File

@@ -15,7 +15,7 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, model_extra, model_fields_set, model_json_schema
{% block attributes %}

View File

@@ -15,7 +15,7 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool, get_config_jsonschema, get_input_jsonschema, get_output_jsonschema, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, to_json, model_extra, model_fields_set, model_json_schema
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃

View File

@@ -945,7 +945,7 @@ Here's an example:
```python
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class Joke(BaseModel):
@@ -1062,7 +1062,7 @@ a `tool_calls` field containing `args` that match the desired shape.
There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example:
```python
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
class ResponseFormatter(BaseModel):

View File

@@ -18,8 +18,23 @@
"cell_type": "code",
"execution_count": 1,
"id": "994d6c74",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:00.190093Z",
"iopub.status.busy": "2024-09-10T20:08:00.189665Z",
"iopub.status.idle": "2024-09-10T20:08:05.438015Z",
"shell.execute_reply": "2024-09-10T20:08:05.437685Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
]
}
],
"source": [
"# Build a sample vectorDB\n",
"from langchain_chroma import Chroma\n",
@@ -54,7 +69,14 @@
"cell_type": "code",
"execution_count": 2,
"id": "edbca101",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.439930Z",
"iopub.status.busy": "2024-09-10T20:08:05.439810Z",
"iopub.status.idle": "2024-09-10T20:08:05.553766Z",
"shell.execute_reply": "2024-09-10T20:08:05.553520Z"
}
},
"outputs": [],
"source": [
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
@@ -71,7 +93,14 @@
"cell_type": "code",
"execution_count": 3,
"id": "9e6d3b69",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.555359Z",
"iopub.status.busy": "2024-09-10T20:08:05.555262Z",
"iopub.status.idle": "2024-09-10T20:08:05.557046Z",
"shell.execute_reply": "2024-09-10T20:08:05.556825Z"
}
},
"outputs": [],
"source": [
"# Set logging for the queries\n",
@@ -85,13 +114,20 @@
"cell_type": "code",
"execution_count": 4,
"id": "bc93dc2b-9407-48b0-9f9a-338247e7eb69",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.558176Z",
"iopub.status.busy": "2024-09-10T20:08:05.558100Z",
"iopub.status.idle": "2024-09-10T20:08:07.250342Z",
"shell.execute_reply": "2024-09-10T20:08:07.249711Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various techniques for breaking down tasks in Task Decomposition?']\n"
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various ways to break down tasks in Task Decomposition?']\n"
]
},
{
@@ -137,14 +173,21 @@
"cell_type": "code",
"execution_count": 5,
"id": "d9afb0ca",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:07.253875Z",
"iopub.status.busy": "2024-09-10T20:08:07.253600Z",
"iopub.status.idle": "2024-09-10T20:08:07.277848Z",
"shell.execute_reply": "2024-09-10T20:08:07.277487Z"
}
},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.output_parsers import BaseOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Output parser will split the LLM result into a list of queries\n",
@@ -180,13 +223,20 @@
"cell_type": "code",
"execution_count": 6,
"id": "59c75c56-dbd7-4887-b9ba-0b5b21069f51",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:07.280001Z",
"iopub.status.busy": "2024-09-10T20:08:07.279861Z",
"iopub.status.idle": "2024-09-10T20:08:09.579525Z",
"shell.execute_reply": "2024-09-10T20:08:09.578837Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n"
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer regarding regression?', '4. In what way is regression covered in the course?', \"5. What are the course's teachings on regression?\"]\n"
]
},
{
@@ -228,7 +278,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -45,7 +45,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@@ -49,7 +49,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@@ -183,7 +184,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-3.5-turbo-1106', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
]
},
"execution_count": 5,
@@ -192,7 +193,7 @@
}
],
"source": [
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(tools=tools)\n",
"model = ChatOpenAI(model=\"gpt-4o-mini\").bind(tools=tools)\n",
"model.invoke(\"What's the weather in SF, NYC and LA?\")"
]
},

View File

@@ -50,7 +50,8 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI()"
]

View File

@@ -26,10 +26,32 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:32.858670Z",
"iopub.status.busy": "2024-09-10T20:22:32.858278Z",
"iopub.status.idle": "2024-09-10T20:22:33.009452Z",
"shell.execute_reply": "2024-09-10T20:22:33.007022Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zsh:1: 0.2.8 not found\r\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
]
@@ -44,19 +66,48 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:33.015729Z",
"iopub.status.busy": "2024-09-10T20:22:33.015241Z",
"iopub.status.idle": "2024-09-10T20:22:39.391716Z",
"shell.execute_reply": "2024-09-10T20:22:39.390438Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95293/571506279.py:4: LangChainBetaWarning: The function `init_chat_model` is in beta. It is actively being worked on, so the API may change.\n",
" gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
"\n",
"Claude Opus: My name is Claude. It's nice to meet you!\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Gemini 1.5: I am a large language model, trained by Google. \n",
"\n",
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
"I don't have a name like a person does. You can call me Bard if you like! 😊 \n",
"\n",
"\n"
]
@@ -94,9 +145,16 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:39.396908Z",
"iopub.status.busy": "2024-09-10T20:22:39.396563Z",
"iopub.status.idle": "2024-09-10T20:22:39.444959Z",
"shell.execute_reply": "2024-09-10T20:22:39.444646Z"
}
},
"outputs": [],
"source": [
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
@@ -116,17 +174,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:39.446901Z",
"iopub.status.busy": "2024-09-10T20:22:39.446773Z",
"iopub.status.idle": "2024-09-10T20:22:40.301906Z",
"shell.execute_reply": "2024-09-10T20:22:40.300918Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-b41df187-4627-490d-af3c-1c96282d3eb0-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
]
},
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -141,17 +206,24 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:40.316030Z",
"iopub.status.busy": "2024-09-10T20:22:40.315628Z",
"iopub.status.idle": "2024-09-10T20:22:41.199134Z",
"shell.execute_reply": "2024-09-10T20:22:41.198173Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01Fx9P74A7syoFkwE73CdMMY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-a0fd2bbd-3b7e-46bf-8d69-a48c7e60b03c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -174,17 +246,24 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 6,
"id": "814a2289-d0db-401e-b555-d5116112b413",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:41.203346Z",
"iopub.status.busy": "2024-09-10T20:22:41.203004Z",
"iopub.status.idle": "2024-09-10T20:22:41.891450Z",
"shell.execute_reply": "2024-09-10T20:22:41.890539Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-3380f977-4b89-4f44-bc02-b64043b3166f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
]
},
"execution_count": 9,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -202,17 +281,24 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 7,
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:41.896413Z",
"iopub.status.busy": "2024-09-10T20:22:41.895967Z",
"iopub.status.idle": "2024-09-10T20:22:42.767565Z",
"shell.execute_reply": "2024-09-10T20:22:42.766619Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01EFKSWpmsn2PSYPQa4cNHWb', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-3c58f47c-41b9-4e56-92e7-fb9602e3787c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 10,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -242,28 +328,37 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:42.771941Z",
"iopub.status.busy": "2024-09-10T20:22:42.771606Z",
"iopub.status.idle": "2024-09-10T20:22:43.909206Z",
"shell.execute_reply": "2024-09-10T20:22:43.908496Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
" 'id': 'call_Ga9m8FAArIyEjItHmztPYA22',\n",
" 'type': 'tool_call'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York, NY'},\n",
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
" 'id': 'call_jh2dEvBaAHRaw5JUDthOs7rt',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 7,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
@@ -288,22 +383,31 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:43.912746Z",
"iopub.status.busy": "2024-09-10T20:22:43.912447Z",
"iopub.status.idle": "2024-09-10T20:22:46.437049Z",
"shell.execute_reply": "2024-09-10T20:22:46.436093Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
" 'id': 'toolu_01JMufPf4F4t2zLj7miFeqXp',\n",
" 'type': 'tool_call'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York City, NY'},\n",
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
" 'id': 'toolu_01RQBHcE8kEEbYTuuS8WqY1u',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}

View File

@@ -71,7 +71,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"openai_response = llm.invoke(\"hello\")\n",
"openai_response.usage_metadata"
]
@@ -182,13 +182,13 @@
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
]
}
],
"source": [
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"aggregate = None\n",
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
@@ -252,7 +252,7 @@
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
]
}
],
@@ -289,7 +289,7 @@
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Joke(BaseModel):\n",
@@ -300,7 +300,7 @@
"\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" model=\"gpt-4o-mini\",\n",
" stream_usage=True,\n",
")\n",
"# Under the hood, .with_structured_output binds tools to the\n",
@@ -362,7 +362,7 @@
"from langchain_community.callbacks.manager import get_openai_callback\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" model=\"gpt-4o-mini\",\n",
" temperature=0,\n",
" stream_usage=True,\n",
")\n",

View File

@@ -77,7 +77,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
"chat = ChatOpenAI(model=\"gpt-4o-mini\")"
]
},
{
@@ -191,7 +191,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
]
},
"execution_count": 5,
@@ -312,7 +312,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
]
},
"execution_count": 8,
@@ -342,7 +342,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
]
},
"execution_count": 9,
@@ -421,7 +421,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
]
},
"execution_count": 22,
@@ -501,7 +501,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
]
},
"execution_count": 24,
@@ -529,9 +529,9 @@
" HumanMessage(content='How are you today?'),\n",
" AIMessage(content='Fine thanks!'),\n",
" HumanMessage(content=\"What's my name?\"),\n",
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
" HumanMessage(content='Where does P. Sherman live?'),\n",
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
]
},
"execution_count": 25,
@@ -565,7 +565,7 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
]
},
"execution_count": 27,

View File

@@ -71,7 +71,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0.2)"
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.2)"
]
},
{

View File

@@ -70,7 +70,7 @@
"\n",
"# Choose the LLM that will drive the agent\n",
"# Only certain models support this\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)"
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{

View File

@@ -58,7 +58,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@@ -281,7 +282,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
]
},
{

View File

@@ -258,7 +258,7 @@
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"\n",
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
"compression_retriever = ContextualCompressionRetriever(\n",

View File

@@ -190,7 +190,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GSchema(BaseModel):\n",
@@ -285,7 +285,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@@ -362,11 +362,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"----\n"
]
}
@@ -497,11 +497,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"----\n"
]
}

View File

@@ -13,7 +13,7 @@
"|---------------|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
"| args_schema | langchain.pydantic_v1.BaseModel | Optional but recommended, and required if using callback handlers. It can be used to provide more information (e.g., few-shot examples) or validation for expected parameters. |\n",
"| args_schema | pydantic.BaseModel | Optional but recommended, and required if using callback handlers. It can be used to provide more information (e.g., few-shot examples) or validation for expected parameters. |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain supports the creation of tools from:\n",
@@ -48,7 +48,14 @@
"cell_type": "code",
"execution_count": 1,
"id": "cc7005cd-072f-4d37-8453-6297468e5192",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:52.645451Z",
"iopub.status.busy": "2024-09-10T20:25:52.645081Z",
"iopub.status.idle": "2024-09-10T20:25:53.030958Z",
"shell.execute_reply": "2024-09-10T20:25:53.030669Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -88,7 +95,14 @@
"cell_type": "code",
"execution_count": 2,
"id": "0c0991db-b997-4611-be37-4346e660506b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.032544Z",
"iopub.status.busy": "2024-09-10T20:25:53.032420Z",
"iopub.status.idle": "2024-09-10T20:25:53.035349Z",
"shell.execute_reply": "2024-09-10T20:25:53.035123Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
@@ -112,22 +126,29 @@
"cell_type": "code",
"execution_count": 3,
"id": "5626423f-053e-4a66-adca-1d794d835397",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.036658Z",
"iopub.status.busy": "2024-09-10T20:25:53.036574Z",
"iopub.status.idle": "2024-09-10T20:25:53.041154Z",
"shell.execute_reply": "2024-09-10T20:25:53.040964Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'multiply_by_maxSchema',\n",
" 'description': 'Multiply a by the maximum of b.',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A',\n",
" 'description': 'scale factor',\n",
"{'description': 'Multiply a by the maximum of b.',\n",
" 'properties': {'a': {'description': 'scale factor',\n",
" 'title': 'A',\n",
" 'type': 'string'},\n",
" 'b': {'title': 'B',\n",
" 'description': 'list of ints over which to take maximum',\n",
" 'type': 'array',\n",
" 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
" 'b': {'description': 'list of ints over which to take maximum',\n",
" 'items': {'type': 'integer'},\n",
" 'title': 'B',\n",
" 'type': 'array'}},\n",
" 'required': ['a', 'b'],\n",
" 'title': 'multiply_by_maxSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 3,
@@ -163,7 +184,14 @@
"cell_type": "code",
"execution_count": 4,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.042516Z",
"iopub.status.busy": "2024-09-10T20:25:53.042427Z",
"iopub.status.idle": "2024-09-10T20:25:53.045217Z",
"shell.execute_reply": "2024-09-10T20:25:53.045010Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -171,13 +199,13 @@
"text": [
"multiplication-tool\n",
"Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
"True\n"
]
}
],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
@@ -218,19 +246,26 @@
"cell_type": "code",
"execution_count": 5,
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.046526Z",
"iopub.status.busy": "2024-09-10T20:25:53.046456Z",
"iopub.status.idle": "2024-09-10T20:25:53.050045Z",
"shell.execute_reply": "2024-09-10T20:25:53.049836Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'fooSchema',\n",
" 'description': 'The foo.',\n",
" 'type': 'object',\n",
" 'properties': {'bar': {'title': 'Bar',\n",
" 'description': 'The bar.',\n",
"{'description': 'The foo.',\n",
" 'properties': {'bar': {'description': 'The bar.',\n",
" 'title': 'Bar',\n",
" 'type': 'string'},\n",
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz']}"
" 'baz': {'description': 'The baz.', 'title': 'Baz', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz'],\n",
" 'title': 'fooSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 5,
@@ -277,7 +312,14 @@
"cell_type": "code",
"execution_count": 6,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.051302Z",
"iopub.status.busy": "2024-09-10T20:25:53.051218Z",
"iopub.status.idle": "2024-09-10T20:25:53.059704Z",
"shell.execute_reply": "2024-09-10T20:25:53.059490Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -320,7 +362,14 @@
"cell_type": "code",
"execution_count": 7,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.060971Z",
"iopub.status.busy": "2024-09-10T20:25:53.060883Z",
"iopub.status.idle": "2024-09-10T20:25:53.064615Z",
"shell.execute_reply": "2024-09-10T20:25:53.064408Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -329,7 +378,7 @@
"6\n",
"Calculator\n",
"multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n"
]
}
],
@@ -373,17 +422,32 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.065797Z",
"iopub.status.busy": "2024-09-10T20:25:53.065733Z",
"iopub.status.idle": "2024-09-10T20:25:53.130458Z",
"shell.execute_reply": "2024-09-10T20:25:53.130229Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95770/2548361071.py:14: LangChainBetaWarning: This API is in beta and may change in the future.\n",
" as_tool = chain.as_tool(\n"
]
},
{
"data": {
"text/plain": [
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
]
},
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -428,19 +492,26 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"id": "1dad8f8e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.131904Z",
"iopub.status.busy": "2024-09-10T20:25:53.131803Z",
"iopub.status.idle": "2024-09-10T20:25:53.136797Z",
"shell.execute_reply": "2024-09-10T20:25:53.136563Z"
}
},
"outputs": [],
"source": [
"from typing import Optional, Type\n",
"\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
@@ -448,9 +519,11 @@
" b: int = Field(description=\"second number\")\n",
"\n",
"\n",
"# Note: It's important that every field has type hints. BaseTool is a\n",
"# Pydantic class and not having type hints can lead to unexpected behavior.\n",
"class CustomCalculatorTool(BaseTool):\n",
" name = \"Calculator\"\n",
" description = \"useful for when you need to answer questions about math\"\n",
" name: str = \"Calculator\"\n",
" description: str = \"useful for when you need to answer questions about math\"\n",
" args_schema: Type[BaseModel] = CalculatorInput\n",
" return_direct: bool = True\n",
"\n",
@@ -477,9 +550,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"id": "bb551c33",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.138074Z",
"iopub.status.busy": "2024-09-10T20:25:53.138007Z",
"iopub.status.idle": "2024-09-10T20:25:53.141360Z",
"shell.execute_reply": "2024-09-10T20:25:53.141158Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -487,7 +567,7 @@
"text": [
"Calculator\n",
"useful for when you need to answer questions about math\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
"True\n",
"6\n",
"6\n"
@@ -528,9 +608,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 11,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.142587Z",
"iopub.status.busy": "2024-09-10T20:25:53.142504Z",
"iopub.status.idle": "2024-09-10T20:25:53.147205Z",
"shell.execute_reply": "2024-09-10T20:25:53.146995Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -560,9 +647,16 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.148383Z",
"iopub.status.busy": "2024-09-10T20:25:53.148307Z",
"iopub.status.idle": "2024-09-10T20:25:53.152684Z",
"shell.execute_reply": "2024-09-10T20:25:53.152486Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -605,9 +699,16 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 13,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.153849Z",
"iopub.status.busy": "2024-09-10T20:25:53.153773Z",
"iopub.status.idle": "2024-09-10T20:25:53.158312Z",
"shell.execute_reply": "2024-09-10T20:25:53.158130Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -650,9 +751,16 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 14,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.159440Z",
"iopub.status.busy": "2024-09-10T20:25:53.159364Z",
"iopub.status.idle": "2024-09-10T20:25:53.160922Z",
"shell.execute_reply": "2024-09-10T20:25:53.160712Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import ToolException\n",
@@ -673,9 +781,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 15,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.162046Z",
"iopub.status.busy": "2024-09-10T20:25:53.161968Z",
"iopub.status.idle": "2024-09-10T20:25:53.165236Z",
"shell.execute_reply": "2024-09-10T20:25:53.165052Z"
}
},
"outputs": [
{
"data": {
@@ -683,7 +798,7 @@
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 16,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@@ -707,9 +822,16 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 16,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.166372Z",
"iopub.status.busy": "2024-09-10T20:25:53.166294Z",
"iopub.status.idle": "2024-09-10T20:25:53.169739Z",
"shell.execute_reply": "2024-09-10T20:25:53.169553Z"
}
},
"outputs": [
{
"data": {
@@ -717,7 +839,7 @@
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 17,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@@ -741,9 +863,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 17,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.170937Z",
"iopub.status.busy": "2024-09-10T20:25:53.170859Z",
"iopub.status.idle": "2024-09-10T20:25:53.174498Z",
"shell.execute_reply": "2024-09-10T20:25:53.174304Z"
}
},
"outputs": [
{
"data": {
@@ -751,7 +880,7 @@
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 18,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@@ -791,9 +920,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 18,
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.175683Z",
"iopub.status.busy": "2024-09-10T20:25:53.175605Z",
"iopub.status.idle": "2024-09-10T20:25:53.178798Z",
"shell.execute_reply": "2024-09-10T20:25:53.178601Z"
}
},
"outputs": [],
"source": [
"import random\n",
@@ -820,9 +956,16 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 19,
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.179881Z",
"iopub.status.busy": "2024-09-10T20:25:53.179807Z",
"iopub.status.idle": "2024-09-10T20:25:53.182100Z",
"shell.execute_reply": "2024-09-10T20:25:53.181940Z"
}
},
"outputs": [
{
"data": {
@@ -830,7 +973,7 @@
"'Successfully generated array of 10 random ints in [0, 9].'"
]
},
"execution_count": 9,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@@ -849,17 +992,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 20,
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.183238Z",
"iopub.status.busy": "2024-09-10T20:25:53.183170Z",
"iopub.status.idle": "2024-09-10T20:25:53.185752Z",
"shell.execute_reply": "2024-09-10T20:25:53.185567Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[4, 8, 2, 4, 1, 0, 9, 5, 8, 1])"
]
},
"execution_count": 3,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@@ -885,9 +1035,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 21,
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.186884Z",
"iopub.status.busy": "2024-09-10T20:25:53.186803Z",
"iopub.status.idle": "2024-09-10T20:25:53.190718Z",
"shell.execute_reply": "2024-09-10T20:25:53.190494Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import BaseTool\n",
@@ -917,17 +1074,24 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 22,
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.191872Z",
"iopub.status.busy": "2024-09-10T20:25:53.191794Z",
"iopub.status.idle": "2024-09-10T20:25:53.194396Z",
"shell.execute_reply": "2024-09-10T20:25:53.194184Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5566, 0.5134, 2.7914])"
]
},
"execution_count": 8,
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}

View File

@@ -90,7 +90,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
]
},
{

View File

@@ -29,9 +29,16 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "89579144-bcb3-490a-8036-86a0a6bcd56b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:41.780410Z",
"iopub.status.busy": "2024-09-10T20:26:41.780102Z",
"iopub.status.idle": "2024-09-10T20:26:42.147112Z",
"shell.execute_reply": "2024-09-10T20:26:42.146838Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
@@ -67,17 +74,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "610c3025-ea63-4cd7-88bd-c8cbcb4d8a3f",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.148746Z",
"iopub.status.busy": "2024-09-10T20:26:42.148621Z",
"iopub.status.idle": "2024-09-10T20:26:42.162044Z",
"shell.execute_reply": "2024-09-10T20:26:42.161794Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"), HumanMessage(content='testing 1 2 3'), HumanMessage(content='this is some text')])"
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\", additional_kwargs={}, response_metadata={}), HumanMessage(content='testing 1 2 3', additional_kwargs={}, response_metadata={}), HumanMessage(content='this is some text', additional_kwargs={}, response_metadata={})])"
]
},
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@@ -104,15 +118,22 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "d875a49a-d2cb-4b9e-b5bf-41073bc3905c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.163477Z",
"iopub.status.busy": "2024-09-10T20:26:42.163391Z",
"iopub.status.idle": "2024-09-10T20:26:42.324449Z",
"shell.execute_reply": "2024-09-10T20:26:42.324206Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@@ -162,9 +183,16 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "08356810-77ce-4e68-99d9-faa0326f2cee",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.326100Z",
"iopub.status.busy": "2024-09-10T20:26:42.326016Z",
"iopub.status.idle": "2024-09-10T20:26:42.329260Z",
"shell.execute_reply": "2024-09-10T20:26:42.329014Z"
}
},
"outputs": [],
"source": [
"import uuid\n",
@@ -177,7 +205,7 @@
" SystemMessage,\n",
" ToolMessage,\n",
")\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Example(TypedDict):\n",
@@ -238,9 +266,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "7f59a745-5c81-4011-a4c5-a33ec1eca7ef",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.330580Z",
"iopub.status.busy": "2024-09-10T20:26:42.330488Z",
"iopub.status.idle": "2024-09-10T20:26:42.332813Z",
"shell.execute_reply": "2024-09-10T20:26:42.332598Z"
}
},
"outputs": [],
"source": [
"examples = [\n",
@@ -273,22 +308,29 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "976bb7b8-09c4-4a3e-80df-49a483705c08",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.333955Z",
"iopub.status.busy": "2024-09-10T20:26:42.333876Z",
"iopub.status.idle": "2024-09-10T20:26:42.336841Z",
"shell.execute_reply": "2024-09-10T20:26:42.336635Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"\n",
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\"\n",
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': None, 'hair_color': None, 'height_in_meters': None}, 'id': 'b843ba77-4c9c-48ef-92a4-54e534f24521'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='b843ba77-4c9c-48ef-92a4-54e534f24521'\n",
"human: content='Fiona traveled far from France to Spain.'\n",
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}, 'id': '46f00d6b-50e5-4482-9406-b07bb10340f6'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='46f00d6b-50e5-4482-9406-b07bb10340f6'\n",
"human: content='this is some text'\n"
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\" additional_kwargs={} response_metadata={}\n",
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\" additional_kwargs={} response_metadata={}\n",
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': []}, 'id': '240159b1-1405-4107-a07c-3c6b91b3d5b7', 'type': 'tool_call'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='240159b1-1405-4107-a07c-3c6b91b3d5b7'\n",
"human: content='Fiona traveled far from France to Spain.' additional_kwargs={} response_metadata={}\n",
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': [{'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}]}, 'id': '3fc521e4-d1d2-4c20-bf40-e3d72f1068da', 'type': 'tool_call'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='3fc521e4-d1d2-4c20-bf40-e3d72f1068da'\n",
"human: content='this is some text' additional_kwargs={} response_metadata={}\n"
]
}
],
@@ -320,9 +362,16 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "df2e1ee1-69e8-4c4d-b349-95f2e320317b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.338001Z",
"iopub.status.busy": "2024-09-10T20:26:42.337915Z",
"iopub.status.idle": "2024-09-10T20:26:42.349121Z",
"shell.execute_reply": "2024-09-10T20:26:42.348908Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@@ -343,9 +392,16 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "dbfea43d-769b-42e9-a76f-ce722f7d6f93",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.350335Z",
"iopub.status.busy": "2024-09-10T20:26:42.350264Z",
"iopub.status.idle": "2024-09-10T20:26:42.424894Z",
"shell.execute_reply": "2024-09-10T20:26:42.424623Z"
}
},
"outputs": [],
"source": [
"runnable = prompt | llm.with_structured_output(\n",
@@ -367,18 +423,49 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"id": "66545cab-af2a-40a4-9dc9-b4110458b7d3",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.426258Z",
"iopub.status.busy": "2024-09-10T20:26:42.426187Z",
"iopub.status.idle": "2024-09-10T20:26:46.151633Z",
"shell.execute_reply": "2024-09-10T20:26:46.150690Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[]\n",
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
}
@@ -401,18 +488,49 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"id": "1c09d805-ec16-4123-aef9-6a5b59499b5c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:46.155346Z",
"iopub.status.busy": "2024-09-10T20:26:46.155110Z",
"iopub.status.idle": "2024-09-10T20:26:51.810359Z",
"shell.execute_reply": "2024-09-10T20:26:51.809636Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n",
"people=[]\n",
"people=[]\n",
"people=[]\n",
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
}
@@ -435,9 +553,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 11,
"id": "a9b7a762-1b75-4f9f-b9d9-6732dd05802c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:51.813309Z",
"iopub.status.busy": "2024-09-10T20:26:51.813150Z",
"iopub.status.idle": "2024-09-10T20:26:53.474153Z",
"shell.execute_reply": "2024-09-10T20:26:53.473522Z"
}
},
"outputs": [
{
"data": {
@@ -445,7 +570,7 @@
"Data(people=[Person(name='Harrison', hair_color='black', height_in_meters=None)])"
]
},
"execution_count": 12,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -476,7 +601,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -23,16 +23,56 @@
"id": "57969139-ad0a-487e-97d8-cb30e2af9742",
"metadata": {},
"source": [
"## Set up\n",
"## Setup\n",
"\n",
"We need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html)."
"First we'll install the dependencies needed for this guide:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
"id": "a3b4d838-5be4-4207-8a4a-9ef5624c48f2",
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:19.850767Z",
"iopub.status.busy": "2024-09-10T20:35:19.850427Z",
"iopub.status.idle": "2024-09-10T20:35:21.432233Z",
"shell.execute_reply": "2024-09-10T20:35:21.431606Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain-community lxml faiss-cpu langchain-openai"
]
},
{
"cell_type": "markdown",
"id": "ac000b03-33fc-414f-8f2c-3850df621a35",
"metadata": {},
"source": [
"Now we need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:21.434882Z",
"iopub.status.busy": "2024-09-10T20:35:21.434571Z",
"iopub.status.idle": "2024-09-10T20:35:22.214545Z",
"shell.execute_reply": "2024-09-10T20:35:22.214253Z"
}
},
"outputs": [],
"source": [
"import re\n",
@@ -55,15 +95,22 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 3,
"id": "fcb6917b-123d-4630-a0ce-ed8b293d482d",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.216143Z",
"iopub.status.busy": "2024-09-10T20:35:22.216039Z",
"iopub.status.idle": "2024-09-10T20:35:22.218117Z",
"shell.execute_reply": "2024-09-10T20:35:22.217854Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"79174\n"
"80427\n"
]
}
],
@@ -87,13 +134,20 @@
"cell_type": "code",
"execution_count": 4,
"id": "a3b288ed-87a6-4af0-aac8-20921dc370d4",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.219468Z",
"iopub.status.busy": "2024-09-10T20:35:22.219395Z",
"iopub.status.idle": "2024-09-10T20:35:22.340594Z",
"shell.execute_reply": "2024-09-10T20:35:22.340319Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class KeyDevelopment(BaseModel):\n",
@@ -156,7 +210,14 @@
"cell_type": "code",
"execution_count": 5,
"id": "109f4f05-d0ff-431d-93d9-8f5aa34979a6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.342277Z",
"iopub.status.busy": "2024-09-10T20:35:22.342171Z",
"iopub.status.idle": "2024-09-10T20:35:22.532302Z",
"shell.execute_reply": "2024-09-10T20:35:22.532034Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@@ -171,7 +232,14 @@
"cell_type": "code",
"execution_count": 6,
"id": "aa4ae224-6d3d-4fe2-b210-7db19a9fe580",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.533795Z",
"iopub.status.busy": "2024-09-10T20:35:22.533708Z",
"iopub.status.idle": "2024-09-10T20:35:22.610573Z",
"shell.execute_reply": "2024-09-10T20:35:22.610307Z"
}
},
"outputs": [],
"source": [
"extractor = prompt | llm.with_structured_output(\n",
@@ -194,7 +262,14 @@
"cell_type": "code",
"execution_count": 7,
"id": "27b8a373-14b3-45ea-8bf5-9749122ad927",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.612123Z",
"iopub.status.busy": "2024-09-10T20:35:22.612052Z",
"iopub.status.idle": "2024-09-10T20:35:22.753493Z",
"shell.execute_reply": "2024-09-10T20:35:22.753179Z"
}
},
"outputs": [],
"source": [
"from langchain_text_splitters import TokenTextSplitter\n",
@@ -227,7 +302,14 @@
"cell_type": "code",
"execution_count": 8,
"id": "6ba766b5-8d6c-48e6-8d69-f391a66b65d2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.755067Z",
"iopub.status.busy": "2024-09-10T20:35:22.754987Z",
"iopub.status.idle": "2024-09-10T20:35:36.691130Z",
"shell.execute_reply": "2024-09-10T20:35:36.690500Z"
}
},
"outputs": [],
"source": [
"# Limit just to the first 3 chunks\n",
@@ -254,21 +336,27 @@
"cell_type": "code",
"execution_count": 9,
"id": "c3f77470-ce6c-477f-8957-650913218632",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:36.694799Z",
"iopub.status.busy": "2024-09-10T20:35:36.694458Z",
"iopub.status.idle": "2024-09-10T20:35:36.701416Z",
"shell.execute_reply": "2024-09-10T20:35:36.700993Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[KeyDevelopment(year=1966, description='The Toyota Corolla began production, becoming the best-selling series of automobile in history.', evidence='The Toyota Corolla, which has been in production since 1966, is the best-selling series of automobile in history.'),\n",
" KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first steam-powered road vehicle.', evidence='The French inventor Nicolas-Joseph Cugnot built the first steam-powered road vehicle in 1769.'),\n",
" KeyDevelopment(year=1808, description='François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile.', evidence='the Swiss inventor François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile in 1808.'),\n",
" KeyDevelopment(year=1886, description='Carl Benz patented his Benz Patent-Motorwagen, inventing the modern car.', evidence='The modern car—a practical, marketable automobile for everyday use—was invented in 1886, when the German inventor Carl Benz patented his Benz Patent-Motorwagen.'),\n",
" KeyDevelopment(year=1908, description='Ford Model T, one of the first cars affordable by the masses, began production.', evidence='One of the first cars affordable by the masses was the Ford Model T, begun in 1908, an American car manufactured by the Ford Motor Company.'),\n",
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
"[KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first full-scale, self-propelled mechanical vehicle, a steam-powered tricycle.', evidence='Nicolas-Joseph Cugnot is widely credited with building the first full-scale, self-propelled mechanical vehicle in about 1769; he created a steam-powered tricycle.'),\n",
" KeyDevelopment(year=1807, description=\"Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine.\", evidence=\"In 1807, Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine (which they called a Pyréolophore), but installed it in a boat on the river Saone in France.\"),\n",
" KeyDevelopment(year=1886, description='Carl Benz patented the Benz Patent-Motorwagen, marking the birth of the modern car.', evidence='In November 1881, French inventor Gustave Trouvé demonstrated a three-wheeled car powered by electricity at the International Exposition of Electricity. Although several other German engineers (including Gottlieb Daimler, Wilhelm Maybach, and Siegfried Marcus) were working on cars at about the same time, the year 1886 is regarded as the birth year of the modern car—a practical, marketable automobile for everyday use—when the German Carl Benz patented his Benz Patent-Motorwagen; he is generally acknowledged as the inventor of the car.'),\n",
" KeyDevelopment(year=1886, description='Carl Benz began promotion of his vehicle, marking the introduction of the first commercially available automobile.', evidence='Benz began promotion of the vehicle on 3 July 1886.'),\n",
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife and business partner of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
" KeyDevelopment(year=1896, description='Benz designed and patented the first internal-combustion flat engine, called boxermotor.', evidence='In 1896, Benz designed and patented the first internal-combustion flat engine, called boxermotor.'),\n",
" KeyDevelopment(year=1897, description='Nesselsdorfer Wagenbau produced the Präsident automobil, one of the first factory-made cars in the world.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
" KeyDevelopment(year=1890, description='Daimler Motoren Gesellschaft (DMG) was founded by Daimler and Maybach in Cannstatt.', evidence='Daimler and Maybach founded Daimler Motoren Gesellschaft (DMG) in Cannstatt in 1890.'),\n",
" KeyDevelopment(year=1891, description='Auguste Doriot and Louis Rigoulot completed the longest trip by a petrol-driven vehicle with a Daimler powered Peugeot Type 3.', evidence='In 1891, Auguste Doriot and his Peugeot colleague Louis Rigoulot completed the longest trip by a petrol-driven vehicle when their self-designed and built Daimler powered Peugeot Type 3 completed 2,100 kilometres (1,300 mi) from Valentigney to Paris and Brest and back again.')]"
" KeyDevelopment(year=1897, description='The first motor car in central Europe and one of the first factory-made cars in the world, the Präsident automobil, was produced by Nesselsdorfer Wagenbau.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
" KeyDevelopment(year=1901, description='Ransom Olds started large-scale, production-line manufacturing of affordable cars at his Oldsmobile factory in Lansing, Michigan.', evidence='Large-scale, production-line manufacturing of affordable cars was started by Ransom Olds in 1901 at his Oldsmobile factory in Lansing, Michigan.'),\n",
" KeyDevelopment(year=1913, description=\"Henry Ford introduced the world's first moving assembly line for cars at the Highland Park Ford Plant.\", evidence=\"This concept was greatly expanded by Henry Ford, beginning in 1913 with the world's first moving assembly line for cars at the Highland Park Ford Plant.\")]"
]
},
"execution_count": 9,
@@ -315,7 +403,14 @@
"cell_type": "code",
"execution_count": 10,
"id": "aaf37c82-625b-4fa1-8e88-73303f08ac16",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:36.703897Z",
"iopub.status.busy": "2024-09-10T20:35:36.703718Z",
"iopub.status.idle": "2024-09-10T20:35:38.451523Z",
"shell.execute_reply": "2024-09-10T20:35:38.450925Z"
}
},
"outputs": [],
"source": [
"from langchain_community.vectorstores import FAISS\n",
@@ -344,7 +439,14 @@
"cell_type": "code",
"execution_count": 11,
"id": "47aad00b-7013-4f7f-a1b0-02ef269093bf",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:38.455094Z",
"iopub.status.busy": "2024-09-10T20:35:38.454851Z",
"iopub.status.idle": "2024-09-10T20:35:38.458315Z",
"shell.execute_reply": "2024-09-10T20:35:38.457940Z"
}
},
"outputs": [],
"source": [
"rag_extractor = {\n",
@@ -356,7 +458,14 @@
"cell_type": "code",
"execution_count": 12,
"id": "68f2de01-0cd8-456e-a959-db236189d41b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:38.460115Z",
"iopub.status.busy": "2024-09-10T20:35:38.459949Z",
"iopub.status.idle": "2024-09-10T20:35:43.195532Z",
"shell.execute_reply": "2024-09-10T20:35:43.194254Z"
}
},
"outputs": [],
"source": [
"results = rag_extractor.invoke(\"Key developments associated with cars\")"
@@ -366,15 +475,21 @@
"cell_type": "code",
"execution_count": 13,
"id": "1788e2d6-77bb-417f-827c-eb96c035164e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:43.200497Z",
"iopub.status.busy": "2024-09-10T20:35:43.200037Z",
"iopub.status.idle": "2024-09-10T20:35:43.206773Z",
"shell.execute_reply": "2024-09-10T20:35:43.205426Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"year=1869 description='Mary Ward became one of the first documented car fatalities in Parsonstown, Ireland.' evidence='Mary Ward became one of the first documented car fatalities in 1869 in Parsonstown, Ireland,'\n",
"year=1899 description=\"Henry Bliss one of the US's first pedestrian car casualties in New York City.\" evidence=\"Henry Bliss one of the US's first pedestrian car casualties in 1899 in New York City.\"\n",
"year=2030 description='All fossil fuel vehicles will be banned in Amsterdam.' evidence='all fossil fuel vehicles will be banned in Amsterdam from 2030.'\n"
"year=2006 description='Car-sharing services in the US experienced double-digit growth in revenue and membership.' evidence='in the US, some car-sharing services have experienced double-digit growth in revenue and membership growth between 2006 and 2007.'\n",
"year=2020 description='56 million cars were manufactured worldwide, with China producing the most.' evidence='In 2020, there were 56 million cars manufactured worldwide, down from 67 million the previous year. The automotive industry in China produces by far the most (20 million in 2020).'\n"
]
}
],
@@ -416,7 +531,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -27,9 +27,16 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "25487939-8713-4ec7-b774-e4a761ac8298",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.442501Z",
"iopub.status.busy": "2024-09-10T20:35:44.442044Z",
"iopub.status.idle": "2024-09-10T20:35:44.872217Z",
"shell.execute_reply": "2024-09-10T20:35:44.871897Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@@ -62,16 +69,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "497eb023-c043-443d-ac62-2d4ea85fe1b0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.873979Z",
"iopub.status.busy": "2024-09-10T20:35:44.873840Z",
"iopub.status.idle": "2024-09-10T20:35:44.878966Z",
"shell.execute_reply": "2024-09-10T20:35:44.878718Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from pydantic import BaseModel, Field, validator\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@@ -114,9 +128,16 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "20b99ffb-a114-49a9-a7be-154c525f8ada",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.880355Z",
"iopub.status.busy": "2024-09-10T20:35:44.880277Z",
"iopub.status.idle": "2024-09-10T20:35:44.881834Z",
"shell.execute_reply": "2024-09-10T20:35:44.881601Z"
}
},
"outputs": [],
"source": [
"query = \"Anna is 23 years old and she is 6 feet tall\""
@@ -124,9 +145,16 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "4f3a66ce-de19-4571-9e54-67504ae3fba7",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.883138Z",
"iopub.status.busy": "2024-09-10T20:35:44.883049Z",
"iopub.status.idle": "2024-09-10T20:35:44.885139Z",
"shell.execute_reply": "2024-09-10T20:35:44.884801Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -140,7 +168,7 @@
"\n",
"Here is the output schema:\n",
"```\n",
"{\"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"title\": \"People\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/Person\"}}}, \"required\": [\"people\"], \"definitions\": {\"Person\": {\"title\": \"Person\", \"description\": \"Information about a person.\", \"type\": \"object\", \"properties\": {\"name\": {\"title\": \"Name\", \"description\": \"The name of the person\", \"type\": \"string\"}, \"height_in_meters\": {\"title\": \"Height In Meters\", \"description\": \"The height of the person expressed in meters.\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"]}}}\n",
"{\"$defs\": {\"Person\": {\"description\": \"Information about a person.\", \"properties\": {\"name\": {\"description\": \"The name of the person\", \"title\": \"Name\", \"type\": \"string\"}, \"height_in_meters\": {\"description\": \"The height of the person expressed in meters.\", \"title\": \"Height In Meters\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"], \"title\": \"Person\", \"type\": \"object\"}}, \"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"items\": {\"$ref\": \"#/$defs/Person\"}, \"title\": \"People\", \"type\": \"array\"}}, \"required\": [\"people\"]}\n",
"```\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
@@ -160,9 +188,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "7e0041eb-37dc-4384-9fe3-6dd8c356371e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.886765Z",
"iopub.status.busy": "2024-09-10T20:35:44.886675Z",
"iopub.status.idle": "2024-09-10T20:35:46.835960Z",
"shell.execute_reply": "2024-09-10T20:35:46.835282Z"
}
},
"outputs": [
{
"data": {
@@ -170,7 +205,7 @@
"People(people=[Person(name='Anna', height_in_meters=1.83)])"
]
},
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -209,9 +244,16 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "b1f11912-c1bb-4a2a-a482-79bf3996961f",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.839577Z",
"iopub.status.busy": "2024-09-10T20:35:46.839233Z",
"iopub.status.idle": "2024-09-10T20:35:46.849663Z",
"shell.execute_reply": "2024-09-10T20:35:46.849177Z"
}
},
"outputs": [],
"source": [
"import json\n",
@@ -221,7 +263,7 @@
"from langchain_anthropic.chat_models import ChatAnthropic\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from pydantic import BaseModel, Field, validator\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@@ -279,16 +321,23 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "9260d5e8-3b6c-4639-9f3b-fb2f90239e4b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.851870Z",
"iopub.status.busy": "2024-09-10T20:35:46.851698Z",
"iopub.status.idle": "2024-09-10T20:35:46.854786Z",
"shell.execute_reply": "2024-09-10T20:35:46.854424Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System: Answer the user query. Output your answer as JSON that matches the given schema: ```json\n",
"{'title': 'People', 'description': 'Identifying information about all people in a text.', 'type': 'object', 'properties': {'people': {'title': 'People', 'type': 'array', 'items': {'$ref': '#/definitions/Person'}}}, 'required': ['people'], 'definitions': {'Person': {'title': 'Person', 'description': 'Information about a person.', 'type': 'object', 'properties': {'name': {'title': 'Name', 'description': 'The name of the person', 'type': 'string'}, 'height_in_meters': {'title': 'Height In Meters', 'description': 'The height of the person expressed in meters.', 'type': 'number'}}, 'required': ['name', 'height_in_meters']}}}\n",
"{'$defs': {'Person': {'description': 'Information about a person.', 'properties': {'name': {'description': 'The name of the person', 'title': 'Name', 'type': 'string'}, 'height_in_meters': {'description': 'The height of the person expressed in meters.', 'title': 'Height In Meters', 'type': 'number'}}, 'required': ['name', 'height_in_meters'], 'title': 'Person', 'type': 'object'}}, 'description': 'Identifying information about all people in a text.', 'properties': {'people': {'items': {'$ref': '#/$defs/Person'}, 'title': 'People', 'type': 'array'}}, 'required': ['people'], 'title': 'People', 'type': 'object'}\n",
"```. Make sure to wrap the answer in ```json and ``` tags\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
@@ -301,17 +350,32 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "c523301d-ae0e-45e3-b195-7fd28c67a5c4",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.856945Z",
"iopub.status.busy": "2024-09-10T20:35:46.856769Z",
"iopub.status.idle": "2024-09-10T20:35:48.373728Z",
"shell.execute_reply": "2024-09-10T20:35:48.373079Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/bagatur/langchain/.venv/lib/python3.11/site-packages/pydantic/_internal/_fields.py:201: UserWarning: Field name \"schema\" in \"PromptInput\" shadows an attribute in parent \"BaseModel\"\n",
" warnings.warn(\n"
]
},
{
"data": {
"text/plain": [
"[{'people': [{'name': 'Anna', 'height_in_meters': 1.83}]}]"
]
},
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -349,7 +413,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -90,7 +90,7 @@
"outputs": [],
"source": [
"# Note that we set max_retries = 0 to avoid retrying on RateLimits, etc\n",
"openai_llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", max_retries=0)\n",
"openai_llm = ChatOpenAI(model=\"gpt-4o-mini\", max_retries=0)\n",
"anthropic_llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
"llm = openai_llm.with_fallbacks([anthropic_llm])"
]

View File

@@ -66,7 +66,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@@ -86,7 +87,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
]
},
"execution_count": 4,
@@ -97,7 +98,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.0)\n",
"\n",
"model.invoke(\"What is 2 🦜 9?\")"
]
@@ -212,7 +213,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
]
},
"execution_count": 8,
@@ -418,7 +419,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
]
},
"execution_count": 13,
@@ -427,7 +428,7 @@
}
],
"source": [
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"chain = final_prompt | ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.0)\n",
"\n",
"chain.invoke({\"input\": \"What's 3 🦜 3?\"})"
]

View File

@@ -136,7 +136,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Note that the docstrings here are crucial, as they will be passed along\n",
@@ -191,7 +191,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@@ -696,7 +696,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -54,7 +54,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@@ -163,8 +163,8 @@
"from typing import List, Optional\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",

View File

@@ -177,14 +177,15 @@
"source": [
"from typing import Optional, Type\n",
"\n",
"# Import things that are needed generically\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"# Import things that are needed generically\n",
"from pydantic import BaseModel, Field\n",
"\n",
"description_query = \"\"\"\n",
"MATCH (m:Movie|Person)\n",
"WHERE m.title CONTAINS $candidate OR m.name CONTAINS $candidate\n",
@@ -226,14 +227,15 @@
"source": [
"from typing import Optional, Type\n",
"\n",
"# Import things that are needed generically\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"# Import things that are needed generically\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class InformationInput(BaseModel):\n",
" entity: str = Field(description=\"movie or a person mentioned in the question\")\n",

View File

@@ -25,7 +25,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"# Please manually enter OpenAI Key"
]
},

View File

@@ -94,7 +94,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\").bind(logprobs=True)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\").bind(logprobs=True)\n",
"\n",
"msg = llm.invoke((\"human\", \"how are you today\"))\n",
"\n",

View File

@@ -11,12 +11,30 @@
"\n",
"The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n",
"\n",
"### Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "198ce37f-4466-45a2-8878-d75cd01a5d23",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-core langchain-anthropic"
]
},
{
"cell_type": "markdown",
"id": "b5c3ca6e-e5b3-4151-8307-9101713a20ae",
"metadata": {},
"source": [
"## Basic usage"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 8,
"id": "1a215bbb-c05c-40b0-a6fd-d94884d517df",
"metadata": {},
"outputs": [
@@ -24,11 +42,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")\n",
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\", additional_kwargs={}, response_metadata={})\n",
"\n",
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])\n",
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'], additional_kwargs={}, response_metadata={})\n",
"\n",
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n"
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!', additional_kwargs={}, response_metadata={})\n"
]
}
],
@@ -63,38 +81,6 @@
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
]
},
{
"cell_type": "markdown",
"id": "11f7e8d3",
"metadata": {},
"source": [
"The `merge_message_runs` utility also works with messages composed together using the overloaded `+` operation:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b51855c5",
"metadata": {},
"outputs": [],
"source": [
"messages = (\n",
" SystemMessage(\"you're a good assistant.\")\n",
" + SystemMessage(\"you always respond with a joke.\")\n",
" + HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}])\n",
" + HumanMessage(\"and who is harrison chasing anyways\")\n",
" + AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" )\n",
" + AIMessage(\n",
" \"Why, he's probably chasing after the last cup of coffee in the office!\"\n",
" )\n",
")\n",
"\n",
"merged = merge_message_runs(messages)\n",
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
]
},
{
"cell_type": "markdown",
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
@@ -107,23 +93,30 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 9,
"id": "6d5a0283-11f8-435b-b27b-7b18f7693592",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
"AIMessage(content=[], additional_kwargs={}, response_metadata={'id': 'msg_01KNGUMTuzBVfwNouLDpUMwf', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-b908b198-9c24-450b-9749-9d4a8182937b-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
]
},
"execution_count": 3,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# pip install -U langchain-anthropic\n",
"%pip install -qU langchain-anthropic\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
@@ -146,19 +139,19 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 10,
"id": "460817a6-c327-429d-958e-181a8c46059c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"),\n",
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']),\n",
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]"
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'], additional_kwargs={}, response_metadata={}),\n",
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!', additional_kwargs={}, response_metadata={})]"
]
},
"execution_count": 4,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -167,6 +160,53 @@
"merger.invoke(messages)"
]
},
{
"cell_type": "markdown",
"id": "4178837d-b155-492d-9404-d567accc1fa0",
"metadata": {},
"source": [
"`merge_message_runs` can also be placed after a prompt:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "620530ab-ed05-4899-b984-bfa4cd738465",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='A convergent series is an infinite series whose partial sums approach a finite value as more terms are added. In other words, the sequence of partial sums has a limit.\\n\\nMore formally, an infinite series Σ an (where an are the terms of the series) is said to be convergent if the sequence of partial sums:\\n\\nS1 = a1\\nS2 = a1 + a2 \\nS3 = a1 + a2 + a3\\n...\\nSn = a1 + a2 + a3 + ... + an\\n...\\n\\nconverges to some finite number S as n goes to infinity. We write:\\n\\nlim n→∞ Sn = S\\n\\nThe finite number S is called the sum of the convergent infinite series.\\n\\nIf the sequence of partial sums does not approach any finite limit, the infinite series is said to be divergent.\\n\\nSome key properties:\\n- A series converges if and only if the sequence of its partial sums is a Cauchy sequence.\\n- Absolute/conditional convergence criteria help determine if a given series converges.\\n- Convergent series have many important applications in mathematics, physics, engineering etc.', additional_kwargs={}, response_metadata={'id': 'msg_01MfV6y2hep7ZNvDz24A36U4', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 267}}, id='run-9d925f58-021e-4bd0-94fc-f8f5e91010a4-0', usage_metadata={'input_tokens': 29, 'output_tokens': 267, 'total_tokens': 296})"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" (\"system\", \"You're great a {skill}\"),\n",
" (\"system\", \"You're also great at explaining things\"),\n",
" (\"human\", \"{query}\"),\n",
" ]\n",
")\n",
"chain = prompt | merger | llm\n",
"chain.invoke({\"skill\": \"math\", \"query\": \"what's the definition of a convergent series\"})"
]
},
{
"cell_type": "markdown",
"id": "51ba533a-43c7-4e5f-bd91-a4ec23ceeb34",
"metadata": {},
"source": [
"LangSmith Trace: https://smith.langchain.com/public/432150b6-9909-40a7-8ae7-944b7e657438/r/f4ad5fb2-4d38-42a6-b780-25f62617d53f"
]
},
{
"cell_type": "markdown",
"id": "4548d916-ce21-4dc6-8f19-eedb8003ace6",
@@ -194,7 +234,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -65,9 +65,11 @@
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API key:\\n\")"
]
},
{
@@ -1325,7 +1327,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -440,7 +440,7 @@
"source": [
"from typing import List\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class HypotheticalQuestions(BaseModel):\n",

View File

@@ -24,8 +24,8 @@
"from typing import List\n",
"\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI"
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field"
]
},
{

View File

@@ -47,7 +47,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@@ -71,8 +72,8 @@
"source": [
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"model = ChatOpenAI(temperature=0)\n",
"\n",

View File

@@ -20,8 +20,8 @@
"from langchain.output_parsers import OutputFixingParser\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI, OpenAI"
"from langchain_openai import ChatOpenAI, OpenAI\n",
"from pydantic import BaseModel, Field"
]
},
{

View File

@@ -35,17 +35,17 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 1,
"id": "1594b2bf-2a6f-47bb-9a81-38930f8e606b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!')"
"Joke(setup='Why did the tomato turn red?', punchline='Because it saw the salad dressing!')"
]
},
"execution_count": 6,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@@ -53,8 +53,8 @@
"source": [
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from langchain_openai import OpenAI\n",
"from pydantic import BaseModel, Field, model_validator\n",
"\n",
"model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n",
"\n",
@@ -65,11 +65,13 @@
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
"\n",
" # You can add custom validation logic easily with Pydantic.\n",
" @validator(\"setup\")\n",
" def question_ends_with_question_mark(cls, field):\n",
" if field[-1] != \"?\":\n",
" @model_validator(mode=\"before\")\n",
" @classmethod\n",
" def question_ends_with_question_mark(cls, values: dict) -> dict:\n",
" setup = values[\"setup\"]\n",
" if setup[-1] != \"?\":\n",
" raise ValueError(\"Badly formed question!\")\n",
" return field\n",
" return values\n",
"\n",
"\n",
"# Set up a parser + inject instructions into the prompt template.\n",
@@ -239,9 +241,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-311",
"language": "python",
"name": "python3"
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
@@ -253,7 +255,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -41,7 +41,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
]
},
{

View File

@@ -39,7 +39,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@@ -70,8 +71,8 @@
"source": [
"from langchain.output_parsers import YamlOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Define your desired data structure.\n",

View File

@@ -60,7 +60,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@@ -46,7 +46,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@@ -269,7 +269,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class CitedAnswer(BaseModel):\n",

View File

@@ -24,9 +24,16 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 1,
"id": "8ca446a0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:35.834087Z",
"iopub.status.busy": "2024-09-11T02:32:35.833763Z",
"iopub.status.idle": "2024-09-11T02:32:36.588973Z",
"shell.execute_reply": "2024-09-11T02:32:36.588677Z"
}
},
"outputs": [],
"source": [
"from typing import Optional\n",
@@ -40,7 +47,7 @@
")\n",
"from langchain_community.query_constructors.chroma import ChromaTranslator\n",
"from langchain_community.query_constructors.elasticsearch import ElasticsearchTranslator\n",
"from langchain_core.pydantic_v1 import BaseModel"
"from pydantic import BaseModel"
]
},
{
@@ -53,9 +60,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 2,
"id": "64055006",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.590665Z",
"iopub.status.busy": "2024-09-11T02:32:36.590527Z",
"iopub.status.idle": "2024-09-11T02:32:36.592985Z",
"shell.execute_reply": "2024-09-11T02:32:36.592763Z"
}
},
"outputs": [],
"source": [
"class Search(BaseModel):\n",
@@ -66,9 +80,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 3,
"id": "44eb6d98",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.594147Z",
"iopub.status.busy": "2024-09-11T02:32:36.594072Z",
"iopub.status.idle": "2024-09-11T02:32:36.595777Z",
"shell.execute_reply": "2024-09-11T02:32:36.595563Z"
}
},
"outputs": [],
"source": [
"search_query = Search(query=\"RAG\", start_year=2022, author=\"LangChain\")"
@@ -76,9 +97,16 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 4,
"id": "e8ba6705",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.596902Z",
"iopub.status.busy": "2024-09-11T02:32:36.596824Z",
"iopub.status.idle": "2024-09-11T02:32:36.598805Z",
"shell.execute_reply": "2024-09-11T02:32:36.598629Z"
}
},
"outputs": [],
"source": [
"def construct_comparisons(query: Search):\n",
@@ -104,9 +132,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 5,
"id": "6a79c9da",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.599989Z",
"iopub.status.busy": "2024-09-11T02:32:36.599909Z",
"iopub.status.idle": "2024-09-11T02:32:36.601521Z",
"shell.execute_reply": "2024-09-11T02:32:36.601306Z"
}
},
"outputs": [],
"source": [
"comparisons = construct_comparisons(search_query)"
@@ -114,9 +149,16 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 6,
"id": "2d0e9689",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.602688Z",
"iopub.status.busy": "2024-09-11T02:32:36.602603Z",
"iopub.status.idle": "2024-09-11T02:32:36.604171Z",
"shell.execute_reply": "2024-09-11T02:32:36.603981Z"
}
},
"outputs": [],
"source": [
"_filter = Operation(operator=Operator.AND, arguments=comparisons)"
@@ -124,9 +166,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 7,
"id": "e4c0b2ce",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.605267Z",
"iopub.status.busy": "2024-09-11T02:32:36.605190Z",
"iopub.status.idle": "2024-09-11T02:32:36.607993Z",
"shell.execute_reply": "2024-09-11T02:32:36.607796Z"
}
},
"outputs": [
{
"data": {
@@ -135,7 +184,7 @@
" {'term': {'metadata.author.keyword': 'LangChain'}}]}}"
]
},
"execution_count": 18,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -146,9 +195,16 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 8,
"id": "d75455ae",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.609091Z",
"iopub.status.busy": "2024-09-11T02:32:36.609012Z",
"iopub.status.idle": "2024-09-11T02:32:36.611075Z",
"shell.execute_reply": "2024-09-11T02:32:36.610869Z"
}
},
"outputs": [
{
"data": {
@@ -156,7 +212,7 @@
"{'$and': [{'start_year': {'$gt': 2022}}, {'author': {'$eq': 'LangChain'}}]}"
]
},
"execution_count": 19,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -182,7 +238,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -35,7 +35,14 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.329739Z",
"iopub.status.busy": "2024-09-11T02:33:48.329033Z",
"iopub.status.idle": "2024-09-11T02:33:48.334555Z",
"shell.execute_reply": "2024-09-11T02:33:48.334086Z"
}
},
"outputs": [],
"source": [
"# %pip install -qU langchain-core langchain-openai"
@@ -53,15 +60,23 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.337140Z",
"iopub.status.busy": "2024-09-11T02:33:48.336958Z",
"iopub.status.idle": "2024-09-11T02:33:48.342671Z",
"shell.execute_reply": "2024-09-11T02:33:48.342281Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@@ -80,14 +95,21 @@
},
{
"cell_type": "code",
"execution_count": 37,
"execution_count": 3,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.345004Z",
"iopub.status.busy": "2024-09-11T02:33:48.344838Z",
"iopub.status.idle": "2024-09-11T02:33:48.413166Z",
"shell.execute_reply": "2024-09-11T02:33:48.412908Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"sub_queries_description = \"\"\"\\\n",
"If the original question contains multiple distinct sub-questions, \\\n",
@@ -121,9 +143,16 @@
},
{
"cell_type": "code",
"execution_count": 64,
"execution_count": 4,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.414805Z",
"iopub.status.busy": "2024-09-11T02:33:48.414700Z",
"iopub.status.idle": "2024-09-11T02:33:49.023858Z",
"shell.execute_reply": "2024-09-11T02:33:49.023547Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
@@ -143,7 +172,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@@ -158,17 +187,24 @@
},
{
"cell_type": "code",
"execution_count": 65,
"execution_count": 5,
"id": "0bcfce06-6f0c-4f9d-a1fc-dc29342d2aae",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:49.025536Z",
"iopub.status.busy": "2024-09-11T02:33:49.025437Z",
"iopub.status.idle": "2024-09-11T02:33:50.170550Z",
"shell.execute_reply": "2024-09-11T02:33:50.169835Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='web voyager vs reflection agents', sub_queries=['difference between web voyager and reflection agents', 'do web voyager and reflection agents use langgraph'], publish_year=None)"
"Search(query='difference between web voyager and reflection agents', sub_queries=['what is web voyager', 'what are reflection agents', 'do both web voyager and reflection agents use langgraph?'], publish_year=None)"
]
},
"execution_count": 65,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -193,9 +229,16 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 6,
"id": "15b4923d-a08e-452d-8889-9a09a57d1095",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.180367Z",
"iopub.status.busy": "2024-09-11T02:33:50.173961Z",
"iopub.status.idle": "2024-09-11T02:33:50.186703Z",
"shell.execute_reply": "2024-09-11T02:33:50.186090Z"
}
},
"outputs": [],
"source": [
"examples = []"
@@ -203,9 +246,16 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 7,
"id": "da5330e6-827a-40e5-982b-b23b6286b758",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.189822Z",
"iopub.status.busy": "2024-09-11T02:33:50.189617Z",
"iopub.status.idle": "2024-09-11T02:33:50.195116Z",
"shell.execute_reply": "2024-09-11T02:33:50.194617Z"
}
},
"outputs": [],
"source": [
"question = \"What's chat langchain, is it a langchain template?\"\n",
@@ -218,9 +268,16 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 8,
"id": "580e857a-27df-4ecf-a19c-458dc9244ec8",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.198178Z",
"iopub.status.busy": "2024-09-11T02:33:50.198002Z",
"iopub.status.idle": "2024-09-11T02:33:50.204115Z",
"shell.execute_reply": "2024-09-11T02:33:50.202534Z"
}
},
"outputs": [],
"source": [
"question = \"How to build multi-agent system and stream intermediate steps from it\"\n",
@@ -238,9 +295,16 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": 9,
"id": "fa63310d-69e3-4701-825c-fbb01f8a5a16",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.207416Z",
"iopub.status.busy": "2024-09-11T02:33:50.207196Z",
"iopub.status.idle": "2024-09-11T02:33:50.212484Z",
"shell.execute_reply": "2024-09-11T02:33:50.211974Z"
}
},
"outputs": [],
"source": [
"question = \"LangChain agents vs LangGraph?\"\n",
@@ -266,9 +330,16 @@
},
{
"cell_type": "code",
"execution_count": 57,
"execution_count": 10,
"id": "68b03709-9a60-4acf-b96c-cafe1056c6f3",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.215540Z",
"iopub.status.busy": "2024-09-11T02:33:50.215250Z",
"iopub.status.idle": "2024-09-11T02:33:50.224108Z",
"shell.execute_reply": "2024-09-11T02:33:50.223490Z"
}
},
"outputs": [],
"source": [
"import uuid\n",
@@ -313,9 +384,16 @@
},
{
"cell_type": "code",
"execution_count": 58,
"execution_count": 11,
"id": "d9bf9f87-3e6b-4fc2-957b-949b077fab54",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.227215Z",
"iopub.status.busy": "2024-09-11T02:33:50.226993Z",
"iopub.status.idle": "2024-09-11T02:33:50.231333Z",
"shell.execute_reply": "2024-09-11T02:33:50.230742Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import MessagesPlaceholder\n",
@@ -329,17 +407,24 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 12,
"id": "e565ccb0-3530-4782-b56b-d1f6d0a8e559",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.233833Z",
"iopub.status.busy": "2024-09-11T02:33:50.233646Z",
"iopub.status.idle": "2024-09-11T02:33:51.318133Z",
"shell.execute_reply": "2024-09-11T02:33:51.317640Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='Difference between web voyager and reflection agents, do they both use LangGraph?', sub_queries=['What is Web Voyager', 'What are Reflection agents', 'Do Web Voyager and Reflection agents use LangGraph'], publish_year=None)"
"Search(query=\"What's the difference between web voyager and reflection agents? Do both use langgraph?\", sub_queries=['What is web voyager', 'What are reflection agents', 'Do web voyager and reflection agents use langgraph?'], publish_year=None)"
]
},
"execution_count": 62,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -377,7 +462,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -33,12 +33,20 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai faker langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma"
]
},
{
@@ -53,15 +61,23 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.036110Z",
"iopub.status.busy": "2024-09-11T02:34:54.035829Z",
"iopub.status.idle": "2024-09-11T02:34:54.038746Z",
"shell.execute_reply": "2024-09-11T02:34:54.038430Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@@ -80,9 +96,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "e5ba65c2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.040738Z",
"iopub.status.busy": "2024-09-11T02:34:54.040515Z",
"iopub.status.idle": "2024-09-11T02:34:54.622643Z",
"shell.execute_reply": "2024-09-11T02:34:54.622382Z"
}
},
"outputs": [],
"source": [
"from faker import Faker\n",
@@ -102,17 +125,24 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "c901ea97",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.624195Z",
"iopub.status.busy": "2024-09-11T02:34:54.624106Z",
"iopub.status.idle": "2024-09-11T02:34:54.627231Z",
"shell.execute_reply": "2024-09-11T02:34:54.626971Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"'Hayley Gonzalez'"
"'Jacob Adams'"
]
},
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -123,17 +153,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "b0d42ae2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.628545Z",
"iopub.status.busy": "2024-09-11T02:34:54.628460Z",
"iopub.status.idle": "2024-09-11T02:34:54.630474Z",
"shell.execute_reply": "2024-09-11T02:34:54.630282Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"'Jesse Knight'"
"'Eric Acevedo'"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -154,19 +191,33 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "0ae69afc",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.631758Z",
"iopub.status.busy": "2024-09-11T02:34:54.631678Z",
"iopub.status.idle": "2024-09-11T02:34:54.666448Z",
"shell.execute_reply": "2024-09-11T02:34:54.666216Z"
}
},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field"
"from pydantic import BaseModel, Field, model_validator"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "6c9485ce",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.667852Z",
"iopub.status.busy": "2024-09-11T02:34:54.667733Z",
"iopub.status.idle": "2024-09-11T02:34:54.700224Z",
"shell.execute_reply": "2024-09-11T02:34:54.700004Z"
}
},
"outputs": [],
"source": [
"class Search(BaseModel):\n",
@@ -176,19 +227,17 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "aebd704a",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.701556Z",
"iopub.status.busy": "2024-09-11T02:34:54.701465Z",
"iopub.status.idle": "2024-09-11T02:34:55.179986Z",
"shell.execute_reply": "2024-09-11T02:34:55.179640Z"
}
],
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
@@ -201,7 +250,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@@ -216,17 +265,24 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 9,
"id": "cc0d344b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:55.181603Z",
"iopub.status.busy": "2024-09-11T02:34:55.181500Z",
"iopub.status.idle": "2024-09-11T02:34:55.778884Z",
"shell.execute_reply": "2024-09-11T02:34:55.778324Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='aliens', author='Jesse Knight')"
]
},
"execution_count": 33,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -245,17 +301,24 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 10,
"id": "82b6b2ad",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:55.784266Z",
"iopub.status.busy": "2024-09-11T02:34:55.782603Z",
"iopub.status.idle": "2024-09-11T02:34:56.206779Z",
"shell.execute_reply": "2024-09-11T02:34:56.206068Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jess Knight')"
"Search(query='aliens', author='Jess Knight')"
]
},
"execution_count": 34,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -276,9 +339,16 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 11,
"id": "98788a94",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.210043Z",
"iopub.status.busy": "2024-09-11T02:34:56.209657Z",
"iopub.status.idle": "2024-09-11T02:34:56.213962Z",
"shell.execute_reply": "2024-09-11T02:34:56.213413Z"
}
},
"outputs": [],
"source": [
"system = \"\"\"Generate a relevant search query for a library system.\n",
@@ -299,9 +369,16 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 12,
"id": "e65412f5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.216144Z",
"iopub.status.busy": "2024-09-11T02:34:56.216005Z",
"iopub.status.idle": "2024-09-11T02:34:56.218754Z",
"shell.execute_reply": "2024-09-11T02:34:56.218416Z"
}
},
"outputs": [],
"source": [
"query_analyzer_all = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
@@ -317,18 +394,17 @@
},
{
"cell_type": "code",
"execution_count": 37,
"execution_count": 13,
"id": "696b000f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error code: 400 - {'error': {'message': \"This model's maximum context length is 16385 tokens. However, your messages resulted in 33885 tokens (33855 in the messages, 30 in the functions). Please reduce the length of the messages or functions.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.220827Z",
"iopub.status.busy": "2024-09-11T02:34:56.220680Z",
"iopub.status.idle": "2024-09-11T02:34:58.846872Z",
"shell.execute_reply": "2024-09-11T02:34:58.846273Z"
}
],
},
"outputs": [],
"source": [
"try:\n",
" res = query_analyzer_all.invoke(\"what are books about aliens by jess knight\")\n",
@@ -346,9 +422,16 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 14,
"id": "0f0d0757",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:58.850318Z",
"iopub.status.busy": "2024-09-11T02:34:58.850100Z",
"iopub.status.idle": "2024-09-11T02:34:58.873883Z",
"shell.execute_reply": "2024-09-11T02:34:58.873525Z"
}
},
"outputs": [],
"source": [
"llm_long = ChatOpenAI(model=\"gpt-4-turbo-preview\", temperature=0)\n",
@@ -358,17 +441,24 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 15,
"id": "03e5b7b2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:58.875940Z",
"iopub.status.busy": "2024-09-11T02:34:58.875811Z",
"iopub.status.idle": "2024-09-11T02:35:02.947273Z",
"shell.execute_reply": "2024-09-11T02:35:02.946220Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='aliens', author='Kevin Knight')"
"Search(query='aliens', author='jess knight')"
]
},
"execution_count": 39,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@@ -389,9 +479,16 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 16,
"id": "32b19e07",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:02.951939Z",
"iopub.status.busy": "2024-09-11T02:35:02.951583Z",
"iopub.status.idle": "2024-09-11T02:35:41.777839Z",
"shell.execute_reply": "2024-09-11T02:35:41.777392Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@@ -403,9 +500,16 @@
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": 17,
"id": "774cb7b0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.780883Z",
"iopub.status.busy": "2024-09-11T02:35:41.780774Z",
"iopub.status.idle": "2024-09-11T02:35:41.782739Z",
"shell.execute_reply": "2024-09-11T02:35:41.782498Z"
}
},
"outputs": [],
"source": [
"def select_names(question):\n",
@@ -416,9 +520,16 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": 18,
"id": "1173159c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.783992Z",
"iopub.status.busy": "2024-09-11T02:35:41.783913Z",
"iopub.status.idle": "2024-09-11T02:35:41.785911Z",
"shell.execute_reply": "2024-09-11T02:35:41.785632Z"
}
},
"outputs": [],
"source": [
"create_prompt = {\n",
@@ -429,9 +540,16 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 19,
"id": "0a892607",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.787082Z",
"iopub.status.busy": "2024-09-11T02:35:41.787008Z",
"iopub.status.idle": "2024-09-11T02:35:41.788543Z",
"shell.execute_reply": "2024-09-11T02:35:41.788362Z"
}
},
"outputs": [],
"source": [
"query_analyzer_select = create_prompt | structured_llm"
@@ -439,17 +557,24 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 20,
"id": "8195d7cd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.789624Z",
"iopub.status.busy": "2024-09-11T02:35:41.789551Z",
"iopub.status.idle": "2024-09-11T02:35:42.099839Z",
"shell.execute_reply": "2024-09-11T02:35:42.099042Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[SystemMessage(content='Generate a relevant search query for a library system.\\n\\n`author` attribute MUST be one of:\\n\\nJesse Knight, Kelly Knight, Scott Knight, Richard Knight, Andrew Knight, Katherine Knight, Erica Knight, Ashley Knight, Becky Knight, Kevin Knight\\n\\nDo NOT hallucinate author name!'), HumanMessage(content='what are books by jess knight')])"
"ChatPromptValue(messages=[SystemMessage(content='Generate a relevant search query for a library system.\\n\\n`author` attribute MUST be one of:\\n\\nJennifer Knight, Jill Knight, John Knight, Dr. Jeffrey Knight, Christopher Knight, Andrea Knight, Brandy Knight, Jennifer Keller, Becky Chambers, Sarah Knapp\\n\\nDo NOT hallucinate author name!'), HumanMessage(content='what are books by jess knight')])"
]
},
"execution_count": 54,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@@ -460,17 +585,24 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 21,
"id": "d3228b4e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.106571Z",
"iopub.status.busy": "2024-09-11T02:35:42.105861Z",
"iopub.status.idle": "2024-09-11T02:35:42.909738Z",
"shell.execute_reply": "2024-09-11T02:35:42.908875Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='books about aliens', author='Jennifer Knight')"
]
},
"execution_count": 55,
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
@@ -492,28 +624,45 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 22,
"id": "a2e8b434",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.915376Z",
"iopub.status.busy": "2024-09-11T02:35:42.914923Z",
"iopub.status.idle": "2024-09-11T02:35:42.923958Z",
"shell.execute_reply": "2024-09-11T02:35:42.922391Z"
}
},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import validator\n",
"\n",
"\n",
"class Search(BaseModel):\n",
" query: str\n",
" author: str\n",
"\n",
" @validator(\"author\")\n",
" def double(cls, v: str) -> str:\n",
" return vectorstore.similarity_search(v, k=1)[0].page_content"
" @model_validator(mode=\"before\")\n",
" @classmethod\n",
" def double(cls, values: dict) -> dict:\n",
" author = values[\"author\"]\n",
" closest_valid_author = vectorstore.similarity_search(author, k=1)[\n",
" 0\n",
" ].page_content\n",
" values[\"author\"] = closest_valid_author\n",
" return values"
]
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 23,
"id": "919c0601",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.927718Z",
"iopub.status.busy": "2024-09-11T02:35:42.927428Z",
"iopub.status.idle": "2024-09-11T02:35:42.933784Z",
"shell.execute_reply": "2024-09-11T02:35:42.933344Z"
}
},
"outputs": [],
"source": [
"system = \"\"\"Generate a relevant search query for a library system\"\"\"\n",
@@ -531,17 +680,24 @@
},
{
"cell_type": "code",
"execution_count": 50,
"execution_count": 24,
"id": "6c4f3e9a",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.936506Z",
"iopub.status.busy": "2024-09-11T02:35:42.936186Z",
"iopub.status.idle": "2024-09-11T02:35:43.711754Z",
"shell.execute_reply": "2024-09-11T02:35:43.710695Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='aliens', author='John Knight')"
]
},
"execution_count": 50,
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
@@ -552,9 +708,16 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 25,
"id": "a309cb11",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:43.717567Z",
"iopub.status.busy": "2024-09-11T02:35:43.717189Z",
"iopub.status.idle": "2024-09-11T02:35:43.722339Z",
"shell.execute_reply": "2024-09-11T02:35:43.720537Z"
}
},
"outputs": [],
"source": [
"# TODO: show trigram similarity"
@@ -563,9 +726,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-311",
"language": "python",
"name": "python3"
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
@@ -577,7 +740,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -33,10 +33,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:53.160868Z",
"iopub.status.busy": "2024-09-11T02:41:53.160512Z",
"iopub.status.idle": "2024-09-11T02:41:57.605370Z",
"shell.execute_reply": "2024-09-11T02:41:57.604888Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@@ -51,15 +66,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:57.607874Z",
"iopub.status.busy": "2024-09-11T02:41:57.607697Z",
"iopub.status.idle": "2024-09-11T02:41:57.610422Z",
"shell.execute_reply": "2024-09-11T02:41:57.610012Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@@ -78,9 +101,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:57.612276Z",
"iopub.status.busy": "2024-09-11T02:41:57.612146Z",
"iopub.status.idle": "2024-09-11T02:41:59.074590Z",
"shell.execute_reply": "2024-09-11T02:41:59.074052Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@@ -108,14 +138,21 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.077712Z",
"iopub.status.busy": "2024-09-11T02:41:59.077514Z",
"iopub.status.idle": "2024-09-11T02:41:59.081509Z",
"shell.execute_reply": "2024-09-11T02:41:59.081112Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@@ -129,19 +166,17 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.083613Z",
"iopub.status.busy": "2024-09-11T02:41:59.083492Z",
"iopub.status.idle": "2024-09-11T02:41:59.204636Z",
"shell.execute_reply": "2024-09-11T02:41:59.204377Z"
}
],
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
@@ -159,7 +194,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@@ -174,17 +209,24 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.206178Z",
"iopub.status.busy": "2024-09-11T02:41:59.206101Z",
"iopub.status.idle": "2024-09-11T02:41:59.817758Z",
"shell.execute_reply": "2024-09-11T02:41:59.817310Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(queries=['Harrison work location'])"
"Search(queries=['Harrison Work', 'Harrison employment history'])"
]
},
"execution_count": 4,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -195,17 +237,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.820168Z",
"iopub.status.busy": "2024-09-11T02:41:59.819990Z",
"iopub.status.idle": "2024-09-11T02:42:00.309034Z",
"shell.execute_reply": "2024-09-11T02:42:00.308578Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(queries=['Harrison work place', 'Ankush work place'])"
"Search(queries=['Harrison work history', 'Ankush work history'])"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -226,9 +275,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.311131Z",
"iopub.status.busy": "2024-09-11T02:42:00.310972Z",
"iopub.status.idle": "2024-09-11T02:42:00.313365Z",
"shell.execute_reply": "2024-09-11T02:42:00.313025Z"
}
},
"outputs": [],
"source": [
"from langchain_core.runnables import chain"
@@ -236,9 +292,16 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 9,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.315138Z",
"iopub.status.busy": "2024-09-11T02:42:00.315016Z",
"iopub.status.idle": "2024-09-11T02:42:00.317427Z",
"shell.execute_reply": "2024-09-11T02:42:00.317088Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@@ -255,17 +318,25 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 10,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.318951Z",
"iopub.status.busy": "2024-09-11T02:42:00.318829Z",
"iopub.status.idle": "2024-09-11T02:42:01.512855Z",
"shell.execute_reply": "2024-09-11T02:42:01.512321Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Harrison worked at Kensho')]"
"[Document(page_content='Harrison worked at Kensho'),\n",
" Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 33,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -276,9 +347,16 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 11,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:01.515743Z",
"iopub.status.busy": "2024-09-11T02:42:01.515400Z",
"iopub.status.idle": "2024-09-11T02:42:02.349930Z",
"shell.execute_reply": "2024-09-11T02:42:02.349382Z"
}
},
"outputs": [
{
"data": {
@@ -287,7 +365,7 @@
" Document(page_content='Ankush worked at Facebook')]"
]
},
"execution_count": 34,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -321,7 +399,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -33,10 +33,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:13.105266Z",
"iopub.status.busy": "2024-09-11T02:42:13.104556Z",
"iopub.status.idle": "2024-09-11T02:42:17.936922Z",
"shell.execute_reply": "2024-09-11T02:42:17.936478Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@@ -51,15 +66,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:17.939072Z",
"iopub.status.busy": "2024-09-11T02:42:17.938929Z",
"iopub.status.idle": "2024-09-11T02:42:17.941266Z",
"shell.execute_reply": "2024-09-11T02:42:17.940968Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@@ -78,9 +101,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:17.942794Z",
"iopub.status.busy": "2024-09-11T02:42:17.942674Z",
"iopub.status.idle": "2024-09-11T02:42:19.939459Z",
"shell.execute_reply": "2024-09-11T02:42:19.938842Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@@ -110,14 +140,21 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:19.942780Z",
"iopub.status.busy": "2024-09-11T02:42:19.942567Z",
"iopub.status.idle": "2024-09-11T02:42:19.947709Z",
"shell.execute_reply": "2024-09-11T02:42:19.947252Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@@ -135,9 +172,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:19.949936Z",
"iopub.status.busy": "2024-09-11T02:42:19.949778Z",
"iopub.status.idle": "2024-09-11T02:42:20.073883Z",
"shell.execute_reply": "2024-09-11T02:42:20.073556Z"
}
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
@@ -154,7 +198,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@@ -169,17 +213,24 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:20.075511Z",
"iopub.status.busy": "2024-09-11T02:42:20.075428Z",
"iopub.status.idle": "2024-09-11T02:42:20.902011Z",
"shell.execute_reply": "2024-09-11T02:42:20.901558Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='workplace', person='HARRISON')"
"Search(query='work history', person='HARRISON')"
]
},
"execution_count": 19,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -190,17 +241,24 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:20.904384Z",
"iopub.status.busy": "2024-09-11T02:42:20.904195Z",
"iopub.status.idle": "2024-09-11T02:42:21.468172Z",
"shell.execute_reply": "2024-09-11T02:42:21.467639Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='workplace', person='ANKUSH')"
"Search(query='work history', person='ANKUSH')"
]
},
"execution_count": 20,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -221,9 +279,16 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.470953Z",
"iopub.status.busy": "2024-09-11T02:42:21.470736Z",
"iopub.status.idle": "2024-09-11T02:42:21.473544Z",
"shell.execute_reply": "2024-09-11T02:42:21.473064Z"
}
},
"outputs": [],
"source": [
"from langchain_core.runnables import chain"
@@ -231,9 +296,16 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 9,
"id": "4ec0c7fe",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.476024Z",
"iopub.status.busy": "2024-09-11T02:42:21.475835Z",
"iopub.status.idle": "2024-09-11T02:42:21.478359Z",
"shell.execute_reply": "2024-09-11T02:42:21.477932Z"
}
},
"outputs": [],
"source": [
"retrievers = {\n",
@@ -244,9 +316,16 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 10,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.480247Z",
"iopub.status.busy": "2024-09-11T02:42:21.480084Z",
"iopub.status.idle": "2024-09-11T02:42:21.482732Z",
"shell.execute_reply": "2024-09-11T02:42:21.482382Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@@ -258,9 +337,16 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 11,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.484480Z",
"iopub.status.busy": "2024-09-11T02:42:21.484361Z",
"iopub.status.idle": "2024-09-11T02:42:22.136704Z",
"shell.execute_reply": "2024-09-11T02:42:22.136244Z"
}
},
"outputs": [
{
"data": {
@@ -268,7 +354,7 @@
"[Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 24,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -279,9 +365,16 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 12,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:22.139305Z",
"iopub.status.busy": "2024-09-11T02:42:22.139106Z",
"iopub.status.idle": "2024-09-11T02:42:23.479739Z",
"shell.execute_reply": "2024-09-11T02:42:23.479170Z"
}
},
"outputs": [
{
"data": {
@@ -289,7 +382,7 @@
"[Document(page_content='Ankush worked at Facebook')]"
]
},
"execution_count": 25,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -323,7 +416,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -35,10 +35,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:33.121714Z",
"iopub.status.busy": "2024-09-11T02:42:33.121392Z",
"iopub.status.idle": "2024-09-11T02:42:36.998607Z",
"shell.execute_reply": "2024-09-11T02:42:36.998126Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@@ -53,15 +68,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:37.001017Z",
"iopub.status.busy": "2024-09-11T02:42:37.000859Z",
"iopub.status.idle": "2024-09-11T02:42:37.003704Z",
"shell.execute_reply": "2024-09-11T02:42:37.003335Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@@ -80,9 +103,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:37.005644Z",
"iopub.status.busy": "2024-09-11T02:42:37.005493Z",
"iopub.status.idle": "2024-09-11T02:42:38.288481Z",
"shell.execute_reply": "2024-09-11T02:42:38.287904Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@@ -110,14 +140,21 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.291700Z",
"iopub.status.busy": "2024-09-11T02:42:38.291468Z",
"iopub.status.idle": "2024-09-11T02:42:38.295796Z",
"shell.execute_reply": "2024-09-11T02:42:38.295205Z"
}
},
"outputs": [],
"source": [
"from typing import Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@@ -131,9 +168,16 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.297840Z",
"iopub.status.busy": "2024-09-11T02:42:38.297712Z",
"iopub.status.idle": "2024-09-11T02:42:38.420456Z",
"shell.execute_reply": "2024-09-11T02:42:38.420140Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
@@ -149,7 +193,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.bind_tools([Search])\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@@ -164,17 +208,24 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.421934Z",
"iopub.status.busy": "2024-09-11T02:42:38.421831Z",
"iopub.status.idle": "2024-09-11T02:42:39.048915Z",
"shell.execute_reply": "2024-09-11T02:42:39.048519Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ZnoVX4j9Mn8wgChaORyd1cvq', 'function': {'arguments': '{\"query\":\"Harrison\"}', 'name': 'Search'}, 'type': 'function'}]})"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_korLZrh08PTRL94f4L7rFqdj', 'function': {'arguments': '{\"query\":\"Harrison\"}', 'name': 'Search'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 95, 'total_tokens': 109}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-ea94d376-37bf-4f80-abe6-e3b42b767ea0-0', tool_calls=[{'name': 'Search', 'args': {'query': 'Harrison'}, 'id': 'call_korLZrh08PTRL94f4L7rFqdj', 'type': 'tool_call'}], usage_metadata={'input_tokens': 95, 'output_tokens': 14, 'total_tokens': 109})"
]
},
"execution_count": 4,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -185,17 +236,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:39.050923Z",
"iopub.status.busy": "2024-09-11T02:42:39.050785Z",
"iopub.status.idle": "2024-09-11T02:42:40.090421Z",
"shell.execute_reply": "2024-09-11T02:42:40.089454Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello! How can I assist you today?')"
"AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 93, 'total_tokens': 103}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'stop', 'logprobs': None}, id='run-ebdfc44a-455a-4ca6-be85-84559886b1e1-0', usage_metadata={'input_tokens': 93, 'output_tokens': 10, 'total_tokens': 103})"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -216,9 +274,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.093716Z",
"iopub.status.busy": "2024-09-11T02:42:40.093472Z",
"iopub.status.idle": "2024-09-11T02:42:40.097732Z",
"shell.execute_reply": "2024-09-11T02:42:40.097274Z"
}
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
@@ -229,9 +294,16 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 9,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.100028Z",
"iopub.status.busy": "2024-09-11T02:42:40.099882Z",
"iopub.status.idle": "2024-09-11T02:42:40.103105Z",
"shell.execute_reply": "2024-09-11T02:42:40.102734Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@@ -248,9 +320,16 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 10,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.105092Z",
"iopub.status.busy": "2024-09-11T02:42:40.104917Z",
"iopub.status.idle": "2024-09-11T02:42:41.341967Z",
"shell.execute_reply": "2024-09-11T02:42:41.341455Z"
}
},
"outputs": [
{
"name": "stderr",
@@ -265,7 +344,7 @@
"[Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 8,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -276,17 +355,24 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 11,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:41.344639Z",
"iopub.status.busy": "2024-09-11T02:42:41.344411Z",
"iopub.status.idle": "2024-09-11T02:42:41.798332Z",
"shell.execute_reply": "2024-09-11T02:42:41.798054Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello! How can I assist you today?')"
"AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 93, 'total_tokens': 103}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'stop', 'logprobs': None}, id='run-e87f058d-30c0-4075-8a89-a01b982d557e-0', usage_metadata={'input_tokens': 93, 'output_tokens': 10, 'total_tokens': 103})"
]
},
"execution_count": 9,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -320,7 +406,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -62,7 +62,8 @@
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"\n",
"model = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)"
]

View File

@@ -44,7 +44,7 @@
" ],\n",
")\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", api_key=\"llm-api-key\")\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", api_key=\"llm-api-key\")\n",
"\n",
"chain = prompt | llm"
]

View File

@@ -136,7 +136,7 @@
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Table(BaseModel):\n",

View File

@@ -101,7 +101,7 @@
"source": [
"from typing import Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Pydantic\n",
@@ -257,17 +257,17 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 19,
"id": "9194bcf2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Response(output=Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=8))"
"FinalResponse(final_output=Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=7))"
]
},
"execution_count": 4,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@@ -293,28 +293,28 @@
" response: str = Field(description=\"A conversational response to the user's query\")\n",
"\n",
"\n",
"class Response(BaseModel):\n",
" output: Union[Joke, ConversationalResponse]\n",
"class FinalResponse(BaseModel):\n",
" final_output: Union[Joke, ConversationalResponse]\n",
"\n",
"\n",
"structured_llm = llm.with_structured_output(Response)\n",
"structured_llm = llm.with_structured_output(FinalResponse)\n",
"\n",
"structured_llm.invoke(\"Tell me a joke about cats\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 20,
"id": "84d86132",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Response(output=ConversationalResponse(response=\"I'm just a digital assistant, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\"))"
"FinalResponse(final_output=ConversationalResponse(response=\"I'm just a bunch of code, so I don't have feelings, but I'm here and ready to help you! How can I assist you today?\"))"
]
},
"execution_count": 5,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@@ -667,7 +667,7 @@
"\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@@ -794,7 +794,7 @@
"\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@@ -915,9 +915,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-2",
"language": "python",
"name": "python3"
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {

View File

@@ -64,7 +64,14 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:40.609832Z",
"iopub.status.busy": "2024-09-11T02:43:40.609565Z",
"iopub.status.idle": "2024-09-11T02:43:40.617860Z",
"shell.execute_reply": "2024-09-11T02:43:40.617391Z"
}
},
"outputs": [],
"source": [
"# The function name, type hints, and docstring are all part of the tool\n",
@@ -109,10 +116,17 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:40.620257Z",
"iopub.status.busy": "2024-09-11T02:43:40.620084Z",
"iopub.status.idle": "2024-09-11T02:43:40.689214Z",
"shell.execute_reply": "2024-09-11T02:43:40.688938Z"
}
},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class add(BaseModel):\n",
@@ -144,7 +158,14 @@
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:40.690850Z",
"iopub.status.busy": "2024-09-11T02:43:40.690739Z",
"iopub.status.idle": "2024-09-11T02:43:40.693436Z",
"shell.execute_reply": "2024-09-11T02:43:40.693199Z"
}
},
"outputs": [],
"source": [
"from typing_extensions import Annotated, TypedDict\n",
@@ -201,7 +222,8 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
@@ -209,12 +231,19 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:42.447839Z",
"iopub.status.busy": "2024-09-11T02:43:42.447760Z",
"iopub.status.idle": "2024-09-11T02:43:43.181171Z",
"shell.execute_reply": "2024-09-11T02:43:43.180680Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_BwYJ4UgU5pRVCBOUmiu7NhF9', 'function': {'arguments': '{\"a\":3,\"b\":12}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 80, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_ba606877f9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7f05e19e-4561-40e2-a2d0-8f4e28e9a00f-0', tool_calls=[{'name': 'multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_BwYJ4UgU5pRVCBOUmiu7NhF9', 'type': 'tool_call'}], usage_metadata={'input_tokens': 80, 'output_tokens': 17, 'total_tokens': 97})"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_iXj4DiW1p7WLjTAQMRO0jxMs', 'function': {'arguments': '{\"a\":3,\"b\":12}', 'name': 'multiply'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 80, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0b620986-3f62-4df7-9ba3-4595089f9ad4-0', tool_calls=[{'name': 'multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_iXj4DiW1p7WLjTAQMRO0jxMs', 'type': 'tool_call'}], usage_metadata={'input_tokens': 80, 'output_tokens': 17, 'total_tokens': 97})"
]
},
"execution_count": 5,
@@ -259,18 +288,25 @@
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:43.184004Z",
"iopub.status.busy": "2024-09-11T02:43:43.183777Z",
"iopub.status.idle": "2024-09-11T02:43:43.743024Z",
"shell.execute_reply": "2024-09-11T02:43:43.742171Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'multiply',\n",
" 'args': {'a': 3, 'b': 12},\n",
" 'id': 'call_rcdMie7E89Xx06lEKKxJyB5N',\n",
" 'id': 'call_1fyhJAbJHuKQe6n0PacubGsL',\n",
" 'type': 'tool_call'},\n",
" {'name': 'add',\n",
" 'args': {'a': 11, 'b': 49},\n",
" 'id': 'call_nheGN8yfvSJsnIuGZaXihou3',\n",
" 'id': 'call_fc2jVkKzwuPWyU7kS9qn1hyG',\n",
" 'type': 'tool_call'}]"
]
},
@@ -306,7 +342,14 @@
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:43:43.746273Z",
"iopub.status.busy": "2024-09-11T02:43:43.746020Z",
"iopub.status.idle": "2024-09-11T02:43:44.586236Z",
"shell.execute_reply": "2024-09-11T02:43:44.585619Z"
}
},
"outputs": [
{
"data": {
@@ -321,7 +364,7 @@
],
"source": [
"from langchain_core.output_parsers import PydanticToolsParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class add(BaseModel):\n",

View File

@@ -57,9 +57,10 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{

View File

@@ -57,9 +57,10 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@@ -77,7 +78,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
]
},
"metadata": {},
@@ -111,7 +112,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
]
},
"metadata": {},

View File

@@ -53,7 +53,8 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]

View File

@@ -55,13 +55,20 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:51.802901Z",
"iopub.status.busy": "2024-09-11T02:52:51.802682Z",
"iopub.status.idle": "2024-09-11T02:52:52.398167Z",
"shell.execute_reply": "2024-09-11T02:52:52.397911Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"# %pip install -qU langchain langchain_openai\n",
"%pip install -qU langchain langchain_openai\n",
"\n",
"import os\n",
"from getpass import getpass\n",
@@ -71,7 +78,7 @@
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@@ -86,7 +93,14 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:52.399922Z",
"iopub.status.busy": "2024-09-11T02:52:52.399796Z",
"iopub.status.idle": "2024-09-11T02:52:52.406349Z",
"shell.execute_reply": "2024-09-11T02:52:52.406077Z"
}
},
"outputs": [],
"source": [
"from typing import List\n",
@@ -141,22 +155,29 @@
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:52.407763Z",
"iopub.status.busy": "2024-09-11T02:52:52.407691Z",
"iopub.status.idle": "2024-09-11T02:52:52.411761Z",
"shell.execute_reply": "2024-09-11T02:52:52.411512Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_petsSchema',\n",
" 'description': 'Add the list of favorite pets.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
"{'description': 'Add the list of favorite pets.',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'},\n",
" 'user_id': {'description': \"User's ID.\",\n",
" 'title': 'User Id',\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
" 'required': ['pets', 'user_id'],\n",
" 'title': 'update_favorite_petsSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 3,
@@ -178,19 +199,26 @@
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:52.427826Z",
"iopub.status.busy": "2024-09-11T02:52:52.427691Z",
"iopub.status.idle": "2024-09-11T02:52:52.431791Z",
"shell.execute_reply": "2024-09-11T02:52:52.431574Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Add the list of favorite pets.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
"{'description': 'Add the list of favorite pets.',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'}},\n",
" 'required': ['pets'],\n",
" 'title': 'update_favorite_pets',\n",
" 'type': 'object'}"
]
},
"execution_count": 4,
@@ -212,7 +240,14 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:52.433096Z",
"iopub.status.busy": "2024-09-11T02:52:52.433014Z",
"iopub.status.idle": "2024-09-11T02:52:52.437499Z",
"shell.execute_reply": "2024-09-11T02:52:52.437239Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -240,14 +275,21 @@
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:52.439148Z",
"iopub.status.busy": "2024-09-11T02:52:52.438742Z",
"iopub.status.idle": "2024-09-11T02:52:53.394524Z",
"shell.execute_reply": "2024-09-11T02:52:53.394005Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'update_favorite_pets',\n",
" 'args': {'pets': ['cats', 'parrots']},\n",
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
" 'id': 'call_pZ6XVREGh1L0BBSsiGIf1xVm',\n",
" 'type': 'tool_call'}]"
]
},
@@ -284,14 +326,21 @@
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:53.397134Z",
"iopub.status.busy": "2024-09-11T02:52:53.396972Z",
"iopub.status.idle": "2024-09-11T02:52:53.403332Z",
"shell.execute_reply": "2024-09-11T02:52:53.402787Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'update_favorite_pets',\n",
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
" 'id': 'call_pZ6XVREGh1L0BBSsiGIf1xVm',\n",
" 'type': 'tool_call'}]"
]
},
@@ -329,12 +378,19 @@
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:53.405183Z",
"iopub.status.busy": "2024-09-11T02:52:53.405048Z",
"iopub.status.idle": "2024-09-11T02:52:54.248576Z",
"shell.execute_reply": "2024-09-11T02:52:54.248107Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_oYCD0THSedHTbwNAY3NW6uUj')]"
]
},
"execution_count": 8,
@@ -365,7 +421,14 @@
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.251169Z",
"iopub.status.busy": "2024-09-11T02:52:54.250948Z",
"iopub.status.idle": "2024-09-11T02:52:54.254279Z",
"shell.execute_reply": "2024-09-11T02:52:54.253889Z"
}
},
"outputs": [
{
"data": {
@@ -394,22 +457,29 @@
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.256425Z",
"iopub.status.busy": "2024-09-11T02:52:54.256279Z",
"iopub.status.idle": "2024-09-11T02:52:54.262533Z",
"shell.execute_reply": "2024-09-11T02:52:54.262228Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'UpdateFavoritePetsSchema',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
"{'description': 'Update list of favorite pets',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'},\n",
" 'user_id': {'description': \"User's ID.\",\n",
" 'title': 'User Id',\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
" 'required': ['pets', 'user_id'],\n",
" 'title': 'UpdateFavoritePetsSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 10,
@@ -418,8 +488,8 @@
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.tools import BaseTool\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class UpdateFavoritePetsSchema(BaseModel):\n",
@@ -440,19 +510,26 @@
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.264192Z",
"iopub.status.busy": "2024-09-11T02:52:54.264074Z",
"iopub.status.idle": "2024-09-11T02:52:54.267400Z",
"shell.execute_reply": "2024-09-11T02:52:54.267113Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
"{'description': 'Update list of favorite pets',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'}},\n",
" 'required': ['pets'],\n",
" 'title': 'update_favorite_pets',\n",
" 'type': 'object'}"
]
},
"execution_count": 11,
@@ -466,26 +543,33 @@
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"execution_count": 12,
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.269027Z",
"iopub.status.busy": "2024-09-11T02:52:54.268905Z",
"iopub.status.idle": "2024-09-11T02:52:54.276123Z",
"shell.execute_reply": "2024-09-11T02:52:54.275876Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'UpdateFavoritePetsSchema',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
"{'description': 'Update list of favorite pets',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'},\n",
" 'user_id': {'description': \"User's ID.\",\n",
" 'title': 'User Id',\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
" 'required': ['pets', 'user_id'],\n",
" 'title': 'UpdateFavoritePetsSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 22,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -508,23 +592,30 @@
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"execution_count": 13,
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.277497Z",
"iopub.status.busy": "2024-09-11T02:52:54.277400Z",
"iopub.status.idle": "2024-09-11T02:52:54.280323Z",
"shell.execute_reply": "2024-09-11T02:52:54.280072Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
"{'description': 'Update list of favorite pets',\n",
" 'properties': {'pets': {'description': 'List of favorite pets to set.',\n",
" 'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'}},\n",
" 'required': ['pets'],\n",
" 'title': 'update_favorite_pets',\n",
" 'type': 'object'}"
]
},
"execution_count": 23,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -535,23 +626,30 @@
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"execution_count": 14,
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.281741Z",
"iopub.status.busy": "2024-09-11T02:52:54.281642Z",
"iopub.status.idle": "2024-09-11T02:52:54.288857Z",
"shell.execute_reply": "2024-09-11T02:52:54.288632Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_petsSchema',\n",
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
"{'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
" 'properties': {'pets': {'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'},\n",
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
" 'required': ['pets', 'user_id'],\n",
" 'title': 'update_favorite_petsSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 24,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -570,22 +668,29 @@
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"execution_count": 15,
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:52:54.290237Z",
"iopub.status.busy": "2024-09-11T02:52:54.290145Z",
"iopub.status.idle": "2024-09-11T02:52:54.294273Z",
"shell.execute_reply": "2024-09-11T02:52:54.294053Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
"{'description': 'Update list of favorite pets',\n",
" 'properties': {'pets': {'items': {'type': 'string'},\n",
" 'title': 'Pets',\n",
" 'type': 'array'}},\n",
" 'required': ['pets'],\n",
" 'title': 'update_favorite_pets',\n",
" 'type': 'object'}"
]
},
"execution_count": 26,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@@ -597,7 +702,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -611,7 +716,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -58,9 +58,10 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"llm_with_tools = llm.bind_tools(tools)"
]
},

View File

@@ -46,19 +46,33 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "84f70856-b865-4658-9930-7577fb4712ce",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:01:11.847104Z",
"iopub.status.busy": "2024-09-11T03:01:11.846727Z",
"iopub.status.idle": "2024-09-11T03:01:13.200038Z",
"shell.execute_reply": "2024-09-11T03:01:13.199355Z"
}
},
"outputs": [],
"source": [
"!pip install -qU wikipedia"
"!pip install -qU langchain-community wikipedia"
]
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": 2,
"id": "b4eaed85-c5a6-4ba9-b401-40258b0131c2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:01:13.203356Z",
"iopub.status.busy": "2024-09-11T03:01:13.202996Z",
"iopub.status.idle": "2024-09-11T03:01:14.740686Z",
"shell.execute_reply": "2024-09-11T03:01:14.739748Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -89,18 +103,25 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 3,
"id": "7f094f01-2e98-4947-acc4-0846963a96e0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:01:14.745018Z",
"iopub.status.busy": "2024-09-11T03:01:14.744347Z",
"iopub.status.idle": "2024-09-11T03:01:14.752527Z",
"shell.execute_reply": "2024-09-11T03:01:14.752112Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"returns directly?: True\n"
"Name: wikipedia\n",
"Description: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.\n",
"args schema: {'query': {'description': 'query to look up on wikipedia', 'title': 'Query', 'type': 'string'}}\n",
"returns directly?: False\n"
]
}
],
@@ -124,9 +145,16 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": 4,
"id": "1365784c-e666-41c8-a1bb-e50f822b5936",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:01:14.755274Z",
"iopub.status.busy": "2024-09-11T03:01:14.755068Z",
"iopub.status.idle": "2024-09-11T03:01:15.375704Z",
"shell.execute_reply": "2024-09-11T03:01:15.374841Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -140,7 +168,7 @@
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class WikiInputs(BaseModel):\n",
@@ -164,9 +192,16 @@
},
{
"cell_type": "code",
"execution_count": 57,
"execution_count": 5,
"id": "6e8850d6-6840-443e-a2be-adf64b30975c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:01:15.378598Z",
"iopub.status.busy": "2024-09-11T03:01:15.378414Z",
"iopub.status.idle": "2024-09-11T03:01:15.382248Z",
"shell.execute_reply": "2024-09-11T03:01:15.381801Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -174,7 +209,7 @@
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"args schema: {'query': {'description': 'query to look up in Wikipedia, should be 3 or less words', 'title': 'Query', 'type': 'string'}}\n",
"returns directly?: True\n"
]
}
@@ -212,9 +247,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-311",
"language": "python",
"name": "python3"
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {

View File

@@ -166,7 +166,7 @@
"\n",
"from langchain_openai.chat_models import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{

View File

@@ -53,7 +53,14 @@
"cell_type": "code",
"execution_count": 2,
"id": "08785b6d-722d-4620-b6ec-36deb3842c69",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:25.005243Z",
"iopub.status.busy": "2024-09-11T03:10:25.005074Z",
"iopub.status.idle": "2024-09-11T03:10:25.007679Z",
"shell.execute_reply": "2024-09-11T03:10:25.007361Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
@@ -81,9 +88,16 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "86258950-5e61-4340-81b9-84a5d26e8773",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:25.009496Z",
"iopub.status.busy": "2024-09-11T03:10:25.009371Z",
"iopub.status.idle": "2024-09-11T03:10:25.552917Z",
"shell.execute_reply": "2024-09-11T03:10:25.552592Z"
}
},
"outputs": [],
"source": [
"# | echo: false\n",
@@ -91,16 +105,24 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:25.554543Z",
"iopub.status.busy": "2024-09-11T03:10:25.554439Z",
"iopub.status.idle": "2024-09-11T03:10:25.631610Z",
"shell.execute_reply": "2024-09-11T03:10:25.631346Z"
}
},
"outputs": [],
"source": [
"# Define tool\n",
@@ -131,26 +153,33 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "d354664c-ac44-4967-a35f-8912b3ad9477",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:25.633050Z",
"iopub.status.busy": "2024-09-11T03:10:25.632978Z",
"iopub.status.idle": "2024-09-11T03:10:26.556508Z",
"shell.execute_reply": "2024-09-11T03:10:26.556233Z"
}
},
"outputs": [
{
"ename": "ValidationError",
"evalue": "1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)",
"evalue": "1 validation error for complex_toolSchema\ndict_arg\n Field required [type=missing, input_value={'int_arg': 5, 'float_arg': 2.1}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.8/v/missing",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/runnables/base.py:2572\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2570\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2571\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2572\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:380\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 374\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 376\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 377\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 378\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 379\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 382\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 384\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 387\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 388\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 389\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:537\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 536\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 538\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 539\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:526\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 525\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 526\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 528\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 529\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m context\u001b[38;5;241m.\u001b[39mrun(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 534\u001b[0m )\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:424\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 423\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 424\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 426\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 429\u001b[0m }\n\u001b[1;32m 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:526\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)"
"Cell \u001b[0;32mIn[5], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/runnables/base.py:2998\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2996\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m context\u001b[38;5;241m.\u001b[39mrun(step\u001b[38;5;241m.\u001b[39minvoke, \u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2997\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2998\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2999\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 3000\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/tools/base.py:456\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 449\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 451\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict, ToolCall],\n\u001b[1;32m 452\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 453\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 454\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 455\u001b[0m tool_input, kwargs \u001b[38;5;241m=\u001b[39m _prep_run_args(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 456\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/tools/base.py:659\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, tool_call_id, **kwargs)\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m error_to_raise:\n\u001b[1;32m 658\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_tool_error(error_to_raise)\n\u001b[0;32m--> 659\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error_to_raise\n\u001b[1;32m 660\u001b[0m output \u001b[38;5;241m=\u001b[39m _format_output(content, artifact, tool_call_id, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, status)\n\u001b[1;32m 661\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_tool_end(output, color\u001b[38;5;241m=\u001b[39mcolor, name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/tools/base.py:622\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, tool_call_id, **kwargs)\u001b[0m\n\u001b[1;32m 620\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 621\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 622\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_to_args_and_kwargs\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 623\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m signature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 624\u001b[0m tool_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/tools/base.py:545\u001b[0m, in \u001b[0;36mBaseTool._to_args_and_kwargs\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 544\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_to_args_and_kwargs\u001b[39m(\u001b[38;5;28mself\u001b[39m, tool_input: Union[\u001b[38;5;28mstr\u001b[39m, Dict]) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tuple[Tuple, Dict]:\n\u001b[0;32m--> 545\u001b[0m tool_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 546\u001b[0m \u001b[38;5;66;03m# For backwards compatibility, if run_input is a string,\u001b[39;00m\n\u001b[1;32m 547\u001b[0m \u001b[38;5;66;03m# pass as a positional argument.\u001b[39;00m\n\u001b[1;32m 548\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m):\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/langchain_core/tools/base.py:487\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 485\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 486\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28missubclass\u001b[39m(input_args, BaseModel):\n\u001b[0;32m--> 487\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel_validate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 488\u001b[0m result_dict \u001b[38;5;241m=\u001b[39m result\u001b[38;5;241m.\u001b[39mmodel_dump()\n\u001b[1;32m 489\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28missubclass\u001b[39m(input_args, BaseModelV1):\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.11/site-packages/pydantic/main.py:568\u001b[0m, in \u001b[0;36mBaseModel.model_validate\u001b[0;34m(cls, obj, strict, from_attributes, context)\u001b[0m\n\u001b[1;32m 566\u001b[0m \u001b[38;5;66;03m# `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\u001b[39;00m\n\u001b[1;32m 567\u001b[0m __tracebackhide__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m--> 568\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__pydantic_validator__\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalidate_python\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 569\u001b[0m \u001b[43m \u001b[49m\u001b[43mobj\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstrict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstrict\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_attributes\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfrom_attributes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcontext\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcontext\u001b[49m\n\u001b[1;32m 570\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n Field required [type=missing, input_value={'int_arg': 5, 'float_arg': 2.1}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.8/v/missing"
]
}
],
@@ -172,9 +201,16 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 6,
"id": "8fedb550-683d-45ae-8876-ae7acb332019",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:26.558131Z",
"iopub.status.busy": "2024-09-11T03:10:26.558031Z",
"iopub.status.idle": "2024-09-11T03:10:27.399844Z",
"shell.execute_reply": "2024-09-11T03:10:27.399201Z"
}
},
"outputs": [
{
"name": "stdout",
@@ -186,9 +222,10 @@
"\n",
"raised the following error:\n",
"\n",
"<class 'pydantic.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
"<class 'pydantic_core._pydantic_core.ValidationError'>: 1 validation error for complex_toolSchema\n",
"dict_arg\n",
" field required (type=value_error.missing)\n"
" Field required [type=missing, input_value={'int_arg': 5, 'float_arg': 2.1}, input_type=dict]\n",
" For further information visit https://errors.pydantic.dev/2.8/v/missing\n"
]
}
],
@@ -226,9 +263,16 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 7,
"id": "02cc4223-35fa-4240-976a-012299ca703c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:27.404122Z",
"iopub.status.busy": "2024-09-11T03:10:27.403539Z",
"iopub.status.idle": "2024-09-11T03:10:38.080547Z",
"shell.execute_reply": "2024-09-11T03:10:38.079955Z"
}
},
"outputs": [
{
"data": {
@@ -236,7 +280,7 @@
"10.5"
]
},
"execution_count": 10,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -277,9 +321,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 8,
"id": "b5659956-9454-468a-9753-a3ff9052b8f5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:38.083810Z",
"iopub.status.busy": "2024-09-11T03:10:38.083623Z",
"iopub.status.idle": "2024-09-11T03:10:38.090089Z",
"shell.execute_reply": "2024-09-11T03:10:38.089682Z"
}
},
"outputs": [],
"source": [
"from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n",
@@ -335,9 +386,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 9,
"id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T03:10:38.092152Z",
"iopub.status.busy": "2024-09-11T03:10:38.092021Z",
"iopub.status.idle": "2024-09-11T03:10:39.592443Z",
"shell.execute_reply": "2024-09-11T03:10:39.591990Z"
}
},
"outputs": [
{
"data": {
@@ -345,7 +403,7 @@
"10.5"
]
},
"execution_count": 12,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -401,7 +459,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@@ -46,9 +46,10 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"llm_with_tools = llm.bind_tools(tools)"
]
},

View File

@@ -50,18 +50,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"AI21_API_KEY\"] = getpass()"
],
"outputs": [],
"execution_count": null
"if \"AI21_API_KEY\" not in os.environ:\n",
" os.environ[\"AI21_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
@@ -73,14 +74,14 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c2e19d3-7c58-4470-9e1a-718b27a32056",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@@ -115,15 +116,15 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "c40756fb-cbf8-4d44-a293-3989d707237e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_ai21 import ChatAI21\n",
"\n",
"llm = ChatAI21(model=\"jamba-instruct\", temperature=0)"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@@ -135,8 +136,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "46b982dc-5d8a-46da-a711-81c03ccd6adc",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" (\n",
@@ -147,9 +150,7 @@
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@@ -163,6 +164,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "39353473fce5dd2e",
"metadata": {
"collapsed": false,
@@ -170,6 +172,7 @@
"outputs_hidden": false
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
@@ -191,25 +194,26 @@
" \"input\": \"I love programming.\",\n",
" }\n",
")"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"source": "# Tool Calls / Function Calling",
"id": "39c0ccd229927eab"
"id": "39c0ccd229927eab",
"metadata": {},
"source": "# Tool Calls / Function Calling"
},
{
"metadata": {},
"cell_type": "markdown",
"source": "This example shows how to use tool calling with AI21 models:",
"id": "2bf6b40be07fe2d4"
"id": "2bf6b40be07fe2d4",
"metadata": {},
"source": "This example shows how to use tool calling with AI21 models:"
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"id": "a181a28df77120fb",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
@@ -219,7 +223,8 @@
"from langchain_core.tools import tool\n",
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
"\n",
"os.environ[\"AI21_API_KEY\"] = getpass()\n",
"if \"AI21_API_KEY\" not in os.environ:\n",
" os.environ[\"AI21_API_KEY\"] = getpass()\n",
"\n",
"\n",
"@tool\n",
@@ -276,10 +281,7 @@
" print(f\"Assistant: {llm_answer.content}\")\n",
" else:\n",
" print(f\"Assistant: {response.content}\")"
],
"id": "a181a28df77120fb",
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",

View File

@@ -59,7 +59,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
]
},
{
@@ -274,7 +275,7 @@
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",

View File

@@ -69,7 +69,7 @@
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"class Person(BaseModel):\n",

View File

@@ -54,7 +54,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"ANYSCALE_API_KEY\"] = getpass()"
"if \"ANYSCALE_API_KEY\" not in os.environ:\n",
" os.environ[\"ANYSCALE_API_KEY\"] = getpass()"
]
},
{

View File

@@ -58,7 +58,10 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\"Enter your AzureOpenAI API key: \")\n",
"if \"AZURE_OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n",
" \"Enter your AzureOpenAI API key: \"\n",
" )\n",
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR-ENDPOINT.openai.azure.com/\""
]
},

View File

@@ -76,7 +76,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"CEREBRAS_API_KEY\"] = getpass.getpass(\"Enter your Cerebras API key: \")"
"if \"CEREBRAS_API_KEY\" not in os.environ:\n",
" os.environ[\"CEREBRAS_API_KEY\"] = getpass.getpass(\"Enter your Cerebras API key: \")"
]
},
{

View File

@@ -90,7 +90,10 @@
"import os\n",
"\n",
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
"if \"DATABRICKS_TOKEN\" not in os.environ:\n",
" os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\n",
" \"Enter your Databricks access token: \"\n",
" )"
]
},
{

View File

@@ -123,8 +123,8 @@
"from dotenv import find_dotenv, load_dotenv\n",
"from langchain_community.chat_models import ChatDeepInfra\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.tools import tool\n",
"from pydantic import BaseModel\n",
"\n",
"model_name = \"meta-llama/Meta-Llama-3-70B-Instruct\"\n",
"\n",

View File

@@ -264,7 +264,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"llm = ChatEdenAI(provider=\"openai\", temperature=0.2, max_tokens=500)\n",
"\n",

View File

@@ -47,7 +47,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"EVERLYAI_API_KEY\"] = getpass()"
"if \"EVERLYAI_API_KEY\" not in os.environ:\n",
" os.environ[\"EVERLYAI_API_KEY\"] = getpass()"
]
},
{

View File

@@ -52,7 +52,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
]
},
{

View File

@@ -44,7 +44,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
"if \"FRIENDLI_TOKEN\" not in os.environ:\n",
" os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
]
},
{

View File

@@ -47,7 +47,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"GIGACHAT_CREDENTIALS\"] = getpass()"
"if \"GIGACHAT_CREDENTIALS\" not in os.environ:\n",
" os.environ[\"GIGACHAT_CREDENTIALS\"] = getpass()"
]
},
{

View File

@@ -60,7 +60,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Enter your Google AI API key: \")"
"if \"GOOGLE_API_KEY\" not in os.environ:\n",
" os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Enter your Google AI API key: \")"
]
},
{

View File

@@ -50,7 +50,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"GROQ_API_KEY\"] = getpass.getpass(\"Enter your Groq API key: \")"
"if \"GROQ_API_KEY\" not in os.environ:\n",
" os.environ[\"GROQ_API_KEY\"] = getpass.getpass(\"Enter your Groq API key: \")"
]
},
{

View File

@@ -483,7 +483,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",

View File

@@ -226,8 +226,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.tools import tool\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class WeatherInput(BaseModel):\n",
@@ -343,8 +343,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"class Joke(BaseModel):\n",

View File

@@ -52,7 +52,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"MISTRAL_API_KEY\"] = getpass.getpass(\"Enter your Mistral API key: \")"
"if \"MISTRAL_API_KEY\" not in os.environ:\n",
" os.environ[\"MISTRAL_API_KEY\"] = getpass.getpass(\"Enter your Mistral API key: \")"
]
},
{

View File

@@ -658,8 +658,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import Field\n",
"from langchain_core.tools import tool\n",
"from pydantic import Field\n",
"\n",
"\n",
"@tool\n",

View File

@@ -426,8 +426,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.tools import tool\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Define the schema for function arguments\n",

View File

@@ -41,7 +41,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"YI_API_KEY\"] = getpass.getpass(\"Enter your Yi API key: \")"
"if \"YI_API_KEY\" not in os.environ:\n",
" os.environ[\"YI_API_KEY\"] = getpass.getpass(\"Enter your Yi API key: \")"
]
},
{

View File

@@ -72,7 +72,7 @@
"source": [
"from enum import Enum\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Operation(Enum):\n",
@@ -135,8 +135,8 @@
"source": [
"from pprint import pprint\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.utils.function_calling import convert_pydantic_to_openai_function\n",
"from pydantic import BaseModel\n",
"\n",
"openai_function_def = convert_pydantic_to_openai_function(Calculator)\n",
"pprint(openai_function_def)"

View File

@@ -39,9 +39,10 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"UNSTRUCTURED_API_KEY\"] = getpass.getpass(\n",
" \"Enter your Unstructured API key: \"\n",
")"
"if \"UNSTRUCTURED_API_KEY\" not in os.environ:\n",
" os.environ[\"UNSTRUCTURED_API_KEY\"] = getpass.getpass(\n",
" \"Enter your Unstructured API key: \"\n",
" )"
]
},
{

View File

@@ -58,7 +58,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"AI21_API_KEY\"] = getpass()"
"if \"AI21_API_KEY\" not in os.environ:\n",
" os.environ[\"AI21_API_KEY\"] = getpass()"
]
},
{

View File

@@ -44,7 +44,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"DASHSCOPE_API_KEY\"] = getpass.getpass(\"DashScope API Key:\")"
"if \"DASHSCOPE_API_KEY\" not in os.environ:\n",
" os.environ[\"DASHSCOPE_API_KEY\"] = getpass.getpass(\"DashScope API Key:\")"
]
},
{

View File

@@ -214,7 +214,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt)\n",
"chain = create_retrieval_chain(compression_retriever, combine_docs_chain)"
]

View File

@@ -50,7 +50,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
]
},
{

View File

@@ -44,8 +44,10 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"VOLC_API_AK\"] = getpass.getpass(\"Volcengine API AK:\")\n",
"os.environ[\"VOLC_API_SK\"] = getpass.getpass(\"Volcengine API SK:\")"
"if \"VOLC_API_AK\" not in os.environ:\n",
" os.environ[\"VOLC_API_AK\"] = getpass.getpass(\"Volcengine API AK:\")\n",
"if \"VOLC_API_SK\" not in os.environ:\n",
" os.environ[\"VOLC_API_SK\"] = getpass.getpass(\"Volcengine API SK:\")"
]
},
{

Some files were not shown because too many files have changed in this diff Show More