Skip to content

Commit 920d504

Browse files
authored
fireworks[patch]: update model in LLM integration tests (#30951)
`mixtral-8x7b-instruct` has been retired.
1 parent 1f30545 commit 920d504

File tree

1 file changed

+11
-9
lines changed

1 file changed

+11
-9
lines changed

libs/partners/fireworks/tests/integration_tests/test_llms.py

+11-9
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@
1111

1212
from langchain_fireworks import Fireworks
1313

14+
_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
15+
1416

1517
def test_fireworks_call() -> None:
1618
"""Test simple call to fireworks."""
1719
llm = Fireworks(
18-
model="accounts/fireworks/models/mixtral-8x7b-instruct",
20+
model=_MODEL,
1921
temperature=0.2,
2022
max_tokens=250,
2123
)
@@ -29,7 +31,7 @@ def test_fireworks_call() -> None:
2931
async def test_fireworks_acall() -> None:
3032
"""Test simple call to fireworks."""
3133
llm = Fireworks(
32-
model="accounts/fireworks/models/mixtral-8x7b-instruct",
34+
model=_MODEL,
3335
temperature=0.2,
3436
max_tokens=250,
3537
)
@@ -43,23 +45,23 @@ async def test_fireworks_acall() -> None:
4345

4446
def test_stream() -> None:
4547
"""Test streaming tokens from OpenAI."""
46-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
48+
llm = Fireworks(model=_MODEL)
4749

4850
for token in llm.stream("I'm Pickle Rick"):
4951
assert isinstance(token, str)
5052

5153

5254
async def test_astream() -> None:
5355
"""Test streaming tokens from OpenAI."""
54-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
56+
llm = Fireworks(model=_MODEL)
5557

5658
async for token in llm.astream("I'm Pickle Rick"):
5759
assert isinstance(token, str)
5860

5961

6062
async def test_abatch() -> None:
6163
"""Test streaming tokens from Fireworks."""
62-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
64+
llm = Fireworks(model=_MODEL)
6365

6466
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
6567
for token in result:
@@ -68,7 +70,7 @@ async def test_abatch() -> None:
6870

6971
async def test_abatch_tags() -> None:
7072
"""Test batch tokens from Fireworks."""
71-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
73+
llm = Fireworks(model=_MODEL)
7274

7375
result = await llm.abatch(
7476
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
@@ -79,7 +81,7 @@ async def test_abatch_tags() -> None:
7981

8082
def test_batch() -> None:
8183
"""Test batch tokens from Fireworks."""
82-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
84+
llm = Fireworks(model=_MODEL)
8385

8486
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
8587
for token in result:
@@ -88,15 +90,15 @@ def test_batch() -> None:
8890

8991
async def test_ainvoke() -> None:
9092
"""Test invoke tokens from Fireworks."""
91-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
93+
llm = Fireworks(model=_MODEL)
9294

9395
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
9496
assert isinstance(result, str)
9597

9698

9799
def test_invoke() -> None:
98100
"""Test invoke tokens from Fireworks."""
99-
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
101+
llm = Fireworks(model=_MODEL)
100102

101103
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
102104
assert isinstance(result, str)

0 commit comments

Comments
 (0)