1
1
# Contextual AI Python API library
2
2
3
- [ ![ PyPI version] ( https://img.shields.io/pypi/v/contextual-sdk .svg )] ( https://pypi.org/project/contextual-sdk / )
3
+ [ ![ PyPI version] ( https://img.shields.io/pypi/v/contextual-client .svg )] ( https://pypi.org/project/contextual-client / )
4
4
5
5
The Contextual AI Python library provides convenient access to the Contextual AI REST API from any Python 3.8+
6
6
application. The library includes type definitions for all request params and response fields,
@@ -20,7 +20,7 @@ pip install git+ssh://git@github.com/stainless-sdks/sunrise-python.git
20
20
```
21
21
22
22
> [ !NOTE]
23
- > Once this package is [ published to PyPI] ( https://app.stainlessapi.com/docs/guides/publish ) , this will become: ` pip install --pre contextual-sdk `
23
+ > Once this package is [ published to PyPI] ( https://app.stainlessapi.com/docs/guides/publish ) , this will become: ` pip install --pre contextual-client `
24
24
25
25
## Usage
26
26
@@ -34,10 +34,10 @@ client = ContextualAI(
34
34
api_key = os.environ.get(" CONTEXTUAL_API_KEY" ), # This is the default and can be omitted
35
35
)
36
36
37
- create_datastore_response = client.datastores .create(
38
- name = " name " ,
37
+ create_application_output = client.applications .create(
38
+ name = " xxx " ,
39
39
)
40
- print (create_datastore_response.id )
40
+ print (create_application_output.application_id )
41
41
```
42
42
43
43
While you can provide an ` api_key ` keyword argument,
@@ -60,10 +60,10 @@ client = AsyncContextualAI(
60
60
61
61
62
62
async def main () -> None :
63
- create_datastore_response = await client.datastores .create(
64
- name = " name " ,
63
+ create_application_output = await client.applications .create(
64
+ name = " xxx " ,
65
65
)
66
- print (create_datastore_response.id )
66
+ print (create_application_output.application_id )
67
67
68
68
69
69
asyncio.run(main())
@@ -91,12 +91,12 @@ from contextual import ContextualAI
91
91
92
92
client = ContextualAI()
93
93
94
- all_datastores = []
94
+ all_applications = []
95
95
# Automatically fetches more pages as needed.
96
- for datastore in client.datastores .list():
97
- # Do something with datastore here
98
- all_datastores .append(datastore )
99
- print (all_datastores )
96
+ for application in client.applications .list():
97
+ # Do something with application here
98
+ all_applications .append(application )
99
+ print (all_applications )
100
100
```
101
101
102
102
Or, asynchronously:
@@ -109,11 +109,11 @@ client = AsyncContextualAI()
109
109
110
110
111
111
async def main () -> None :
112
- all_datastores = []
112
+ all_applications = []
113
113
# Iterate through items across all pages, issuing requests as needed.
114
- async for datastore in client.datastores .list():
115
- all_datastores .append(datastore )
116
- print (all_datastores )
114
+ async for application in client.applications .list():
115
+ all_applications .append(application )
116
+ print (all_applications )
117
117
118
118
119
119
asyncio.run(main())
@@ -122,23 +122,23 @@ asyncio.run(main())
122
122
Alternatively, you can use the ` .has_next_page() ` , ` .next_page_info() ` , or ` .get_next_page() ` methods for more granular control working with pages:
123
123
124
124
``` python
125
- first_page = await client.datastores .list()
125
+ first_page = await client.applications .list()
126
126
if first_page.has_next_page():
127
127
print (f " will fetch next page using these details: { first_page.next_page_info()} " )
128
128
next_page = await first_page.get_next_page()
129
- print (f " number of items we just fetched: { len (next_page.datastores )} " )
129
+ print (f " number of items we just fetched: { len (next_page.applications )} " )
130
130
131
131
# Remove `await` for non-async usage.
132
132
```
133
133
134
134
Or just work directly with the returned data:
135
135
136
136
``` python
137
- first_page = await client.datastores .list()
137
+ first_page = await client.applications .list()
138
138
139
139
print (f " next page cursor: { first_page.next_cursor} " ) # => "next page cursor: ..."
140
- for datastore in first_page.datastores :
141
- print (datastore .id)
140
+ for application in first_page.applications :
141
+ print (application .id)
142
142
143
143
# Remove `await` for non-async usage.
144
144
```
@@ -159,8 +159,8 @@ from contextual import ContextualAI
159
159
client = ContextualAI()
160
160
161
161
try :
162
- client.datastores .create(
163
- name = " name " ,
162
+ client.applications .create(
163
+ name = " xxx " ,
164
164
)
165
165
except contextual.APIConnectionError as e:
166
166
print (" The server could not be reached" )
@@ -204,8 +204,8 @@ client = ContextualAI(
204
204
)
205
205
206
206
# Or, configure per-request:
207
- client.with_options(max_retries = 5 ).datastores .create(
208
- name = " name " ,
207
+ client.with_options(max_retries = 5 ).applications .create(
208
+ name = " xxx " ,
209
209
)
210
210
```
211
211
@@ -229,8 +229,8 @@ client = ContextualAI(
229
229
)
230
230
231
231
# Override per-request:
232
- client.with_options(timeout = 5.0 ).datastores .create(
233
- name = " name " ,
232
+ client.with_options(timeout = 5.0 ).applications .create(
233
+ name = " xxx " ,
234
234
)
235
235
```
236
236
@@ -272,13 +272,13 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
272
272
from contextual import ContextualAI
273
273
274
274
client = ContextualAI()
275
- response = client.datastores .with_raw_response.create(
276
- name = " name " ,
275
+ response = client.applications .with_raw_response.create(
276
+ name = " xxx " ,
277
277
)
278
278
print (response.headers.get(' X-My-Header' ))
279
279
280
- datastore = response.parse() # get the object that `datastores .create()` would have returned
281
- print (datastore.id )
280
+ application = response.parse() # get the object that `applications .create()` would have returned
281
+ print (application.application_id )
282
282
```
283
283
284
284
These methods return an [ ` APIResponse ` ] ( https://github.com/stainless-sdks/sunrise-python/tree/main/src/contextual/_response.py ) object.
@@ -292,8 +292,8 @@ The above interface eagerly reads the full response body when you make the reque
292
292
To stream the response body, use ` .with_streaming_response ` instead, which requires a context manager and only reads the response body once you call ` .read() ` , ` .text() ` , ` .json() ` , ` .iter_bytes() ` , ` .iter_text() ` , ` .iter_lines() ` or ` .parse() ` . In the async client, these are async methods.
293
293
294
294
``` python
295
- with client.datastores .with_streaming_response.create(
296
- name = " name " ,
295
+ with client.applications .with_streaming_response.create(
296
+ name = " xxx " ,
297
297
) as response:
298
298
print (response.headers.get(" X-My-Header" ))
299
299
0 commit comments