11
11
12
12
from langchain_fireworks import Fireworks
13
13
14
+ _MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
15
+
14
16
15
17
def test_fireworks_call () -> None :
16
18
"""Test simple call to fireworks."""
17
19
llm = Fireworks (
18
- model = "accounts/fireworks/models/mixtral-8x7b-instruct" ,
20
+ model = _MODEL ,
19
21
temperature = 0.2 ,
20
22
max_tokens = 250 ,
21
23
)
@@ -29,7 +31,7 @@ def test_fireworks_call() -> None:
29
31
async def test_fireworks_acall () -> None :
30
32
"""Test simple call to fireworks."""
31
33
llm = Fireworks (
32
- model = "accounts/fireworks/models/mixtral-8x7b-instruct" ,
34
+ model = _MODEL ,
33
35
temperature = 0.2 ,
34
36
max_tokens = 250 ,
35
37
)
@@ -43,23 +45,23 @@ async def test_fireworks_acall() -> None:
43
45
44
46
def test_stream () -> None :
45
47
"""Test streaming tokens from OpenAI."""
46
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
48
+ llm = Fireworks (model = _MODEL )
47
49
48
50
for token in llm .stream ("I'm Pickle Rick" ):
49
51
assert isinstance (token , str )
50
52
51
53
52
54
async def test_astream () -> None :
53
55
"""Test streaming tokens from OpenAI."""
54
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
56
+ llm = Fireworks (model = _MODEL )
55
57
56
58
async for token in llm .astream ("I'm Pickle Rick" ):
57
59
assert isinstance (token , str )
58
60
59
61
60
62
async def test_abatch () -> None :
61
63
"""Test streaming tokens from Fireworks."""
62
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
64
+ llm = Fireworks (model = _MODEL )
63
65
64
66
result = await llm .abatch (["I'm Pickle Rick" , "I'm not Pickle Rick" ])
65
67
for token in result :
@@ -68,7 +70,7 @@ async def test_abatch() -> None:
68
70
69
71
async def test_abatch_tags () -> None :
70
72
"""Test batch tokens from Fireworks."""
71
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
73
+ llm = Fireworks (model = _MODEL )
72
74
73
75
result = await llm .abatch (
74
76
["I'm Pickle Rick" , "I'm not Pickle Rick" ], config = {"tags" : ["foo" ]}
@@ -79,7 +81,7 @@ async def test_abatch_tags() -> None:
79
81
80
82
def test_batch () -> None :
81
83
"""Test batch tokens from Fireworks."""
82
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
84
+ llm = Fireworks (model = _MODEL )
83
85
84
86
result = llm .batch (["I'm Pickle Rick" , "I'm not Pickle Rick" ])
85
87
for token in result :
@@ -88,15 +90,15 @@ def test_batch() -> None:
88
90
89
91
async def test_ainvoke () -> None :
90
92
"""Test invoke tokens from Fireworks."""
91
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
93
+ llm = Fireworks (model = _MODEL )
92
94
93
95
result = await llm .ainvoke ("I'm Pickle Rick" , config = {"tags" : ["foo" ]})
94
96
assert isinstance (result , str )
95
97
96
98
97
99
def test_invoke () -> None :
98
100
"""Test invoke tokens from Fireworks."""
99
- llm = Fireworks (model = "accounts/fireworks/models/mixtral-8x7b-instruct" )
101
+ llm = Fireworks (model = _MODEL )
100
102
101
103
result = llm .invoke ("I'm Pickle Rick" , config = dict (tags = ["foo" ]))
102
104
assert isinstance (result , str )
0 commit comments