Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
2555d5b
Use aicore serving executable names
marcorosa Feb 18, 2025
7cd1f9e
Support aicore-mistralai models
marcorosa Feb 18, 2025
0339f56
Use correct serving executable name for openai
marcorosa Feb 18, 2025
23c9e6a
Fix linter error
marcorosa Feb 18, 2025
99bb0f9
Merge pull request #34 from SAP/fix/mistralai
marcorosa Feb 18, 2025
6f5ab57
Update packages
marcorosa Mar 3, 2025
047bcad
Use non-standalone components
marcorosa Mar 3, 2025
341d03b
Remove dev environment placeholders
marcorosa Mar 3, 2025
c9a2747
Merge pull request #36 from SAP/fix/frontend
marcorosa Mar 3, 2025
fa36810
Support remote ollama models
marcorosa Mar 18, 2025
550b5f1
update ollama version
marcorosa Mar 18, 2025
cb54ac3
set generate params default values to avoid runtime exceptions
marcorosa Mar 20, 2025
8b476b1
Reuse Version update from dep to toml
ajinkyapatil8190 Mar 24, 2025
28691ef
Merge pull request #39 from SAP/ollama-remote
marcorosa Mar 25, 2025
b9f987a
Merge pull request #40 from SAP/Reuse-Migration-TOML-Branch
marcorosa Mar 25, 2025
5c45b77
Fix remote ollama models discovery
marcorosa Apr 8, 2025
63003e1
Bump flask-cors from 5.0.0 to 6.0.0 in /backend-agent
dependabot[bot] May 17, 2025
b2ba949
Fix js linter
marcorosa May 27, 2025
dd4c034
Merge pull request #48 from SAP/fix/frontend-linter
marcorosa May 27, 2025
72f63f1
Merge pull request #47 from SAP/dependabot/pip/backend-agent/flask-co…
marcorosa May 27, 2025
d733842
Bump prismjs from 1.29.0 to 1.30.0 in /frontend
dependabot[bot] May 27, 2025
2b97748
Merge branch 'develop' into dependabot/npm_and_yarn/frontend/prismjs-…
marcorosa May 27, 2025
4c2d6fd
Update package.json
marcorosa May 27, 2025
2f0568d
Merge branch 'develop' into dependabot/npm_and_yarn/frontend/prismjs-…
marcorosa May 27, 2025
28c5a5b
Merge pull request #37 from SAP/dependabot/npm_and_yarn/frontend/pris…
marcorosa May 27, 2025
dbce5b1
[Changelog CI] Add Changelog for Version v0.2.1
github-actions[bot] May 27, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions .github/workflows/lint-frontend.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,10 @@ jobs:
npm ci

- name: Run linters
uses: wearerequired/lint-action@v2
uses: reviewdog/action-eslint@v1
with:
eslint: true
eslint_dir: frontend
eslint_extensions: js,ts
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-review
eslint_flags: 'frontend/'
fail_on_error: true
level: warning
50 changes: 0 additions & 50 deletions .reuse/dep5

This file was deleted.

11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
# Version: v0.2.1

* [#34](https://github.com/SAP/STARS/pull/34): Support aicore-mistralai models
* [#36](https://github.com/SAP/STARS/pull/36): Fix/frontend
* [#37](https://github.com/SAP/STARS/pull/37): Bump prismjs from 1.29.0 to 1.30.0 in /frontend
* [#39](https://github.com/SAP/STARS/pull/39): Support remote ollama models
* [#40](https://github.com/SAP/STARS/pull/40): [chore]:Reuse API update - Migration from dep5 file to TOML file.
* [#47](https://github.com/SAP/STARS/pull/47): Bump flask-cors from 5.0.0 to 6.0.0 in /backend-agent
* [#48](https://github.com/SAP/STARS/pull/48): Fix js linter action


# Version: v0.2.0

* [#25](https://github.com/SAP/STARS/pull/25): Bump katex from 0.16.10 to 0.16.21 in /frontend
Expand Down
32 changes: 32 additions & 0 deletions REUSE.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
version = 1
SPDX-PackageName = "Smart Threat AI Reporting Scanner (STARS)"
SPDX-PackageSupplier = "ospo@sap.com"
SPDX-PackageDownloadLocation = "https://github.com/SAP/stars"
SPDX-PackageComment = "The code in this project may include calls to APIs (\"API Calls\") of\n SAP or third-party products or services developed outside of this project\n (\"External Products\").\n \"APIs\" means application programming interfaces, as well as their respective\n specifications and implementing code that allows software to communicate with\n other software.\n API Calls to External Products are not licensed under the open source license\n that governs this project. The use of such API Calls and related External\n Products are subject to applicable additional agreements with the relevant\n provider of the External Products. In no event shall the open source license\n that governs this project grant any rights in or to any External Products,or\n alter, expand or supersede any terms of the applicable additional agreements.\n If you have a valid license agreement with SAP for the use of a particular SAP\n External Product, then you may make use of any API Calls included in this\n project's code for that SAP External Product, subject to the terms of such\n license agreement. If you do not have a valid license agreement for the use of\n a particular SAP External Product, then you may only make use of any API Calls\n in this project for that SAP External Product for your internal, non-productive\n and non-commercial test and evaluation of such API Calls. Nothing herein grants\n you any rights to use or access any SAP External Product, or provide any third\n parties the right to use of access any SAP External Product, through API Calls."

[[annotations]]
path = "**"
precedence = "aggregate"
SPDX-FileCopyrightText = "2024 SAP SE or an SAP affiliate company and stars contributors"
SPDX-License-Identifier = "Apache-2.0"

[[annotations]]
path = "backend-agent/cli.py"
precedence = "aggregate"
SPDX-FileCopyrightText = ["", "2024 SAP SE or an SAP affiliate company and STARS contributors", "2021 Sandflow Consulting LLC"]
SPDX-License-Identifier = "Apache-2.0 and BSD-3-Clause"
SPDX-FileComment = "these files contain content from SAP and ttconv: cli.py is overall written by SAP, but contains a code snippet from src/main/python/ttconv/tt.py file taken from ttconv"

[[annotations]]
path = "backend-agent/libs/artprompt.py"
precedence = "aggregate"
SPDX-FileCopyrightText = ["", "2024 SAP SE or an SAP affiliate company and STARS contributors", "2024 UW-NSL"]
SPDX-License-Identifier = "Apache-2.0 and MIT"
SPDX-FileComment = "these files contain content from SAP and UW-NSL: the original content was written by UW-NSL, but it was refactored, simplified, and modified by SAP to be more suitable to this project"

[[annotations]]
path = "frontend/src/app/app.component.spec.ts"
precedence = "aggregate"
SPDX-FileCopyrightText = ["", "2010-2020 Google LLC. https://angular.io/license"]
SPDX-License-Identifier = "MIT"
SPDX-FileComment = "these files contain content from Angular 11.1.0-next.1 (it contains a code snippet from integration/trusted-types/src/app/app.component.spec.ts file taken from angular)"
126 changes: 78 additions & 48 deletions backend-agent/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,35 +23,21 @@
logger.addHandler(status.trace_logging)

AICORE_MODELS = {
'openai':
'aicore-ibm':
[
'gpt-35-turbo',
'gpt-35-turbo-0125',
'gpt-35-turbo-16k',
'gpt-4',
'gpt-4-32k',
'gpt-4o',
'gpt-4o-mini'
'ibm--granite-13b-chat'
],
'aicore-mistralai':
[
'mistralai--mistral-large-instruct',
],
'opensource':
'aicore-opensource':
[
'mistralai--mixtral-8x7b-instruct-v01',
'meta--llama3.1-70b-instruct',
'meta--llama3-70b-instruct'
],
'vertexai':
[
'text-bison',
'chat-bison',
'gemini-1.0-pro',
'gemini-1.5-pro',
'gemini-1.5-flash'
],
'ibm':
[
'ibm--granite-13b-chat'
],
'bedrock':
'aws-bedrock':
[
'amazon--titan-text-lite',
'amazon--titan-text-express',
Expand All @@ -62,7 +48,25 @@
'amazon--nova-pro',
'amazon--nova-lite',
'amazon--nova-micro'
]
],
'azure-openai':
[
'gpt-35-turbo',
'gpt-35-turbo-0125',
'gpt-35-turbo-16k',
'gpt-4',
'gpt-4-32k',
'gpt-4o',
'gpt-4o-mini'
],
'gcp-vertexai':
[
'text-bison',
'chat-bison',
'gemini-1.0-pro',
'gemini-1.5-pro',
'gemini-1.5-flash'
],
}


Expand All @@ -79,25 +83,30 @@ def from_model_name(cls, model_name: str) -> 'LLM':
Create a specific LLM object from the name of the model.
Useful because the user can specify only the name in the agent.
"""
if 'gpt' in model_name:
# Foundation-models scenarios in AI Core
if model_name in AICORE_MODELS['azure-openai']:
# The agent sometimes autocorrects gpt-35-turbo to gpt-3.5-turbo,
# so we handle this behavior here.
if model_name == 'gpt-3.5-turbo':
model_name = 'gpt-35-turbo'
return AICoreOpenAILLM(model_name)
if model_name in AICORE_MODELS['opensource']:
return AICoreOpenAILLM(model_name, False)
if model_name in AICORE_MODELS['ibm']:
if model_name in AICORE_MODELS['aicore-ibm']:
# IBM models are compatible with OpenAI completion API
return AICoreOpenAILLM(model_name)
if model_name in AICORE_MODELS['vertexai']:
return AICoreGoogleVertexLLM(model_name)
if model_name in AICORE_MODELS['bedrock']:
if model_name in AICORE_MODELS['aicore-opensource']:
return AICoreOpenAILLM(model_name, False)
if model_name in AICORE_MODELS['aicore-mistralai']:
return AICoreOpenAILLM(model_name, False)
if model_name in AICORE_MODELS['aws-bedrock']:
if 'titan' in model_name:
# Titan models don't support system prompts
return AICoreAmazonBedrockLLM(model_name, False)
else:
return AICoreAmazonBedrockLLM(model_name)
if model_name in AICORE_MODELS['gcp-vertexai']:
return AICoreGoogleVertexLLM(model_name)

# Custom models
if model_name == 'mistral':
return LocalOpenAILLM(
os.getenv('MISTRAL_MODEL_NAME', ''),
Expand All @@ -109,9 +118,17 @@ def from_model_name(cls, model_name: str) -> 'LLM':
# possible local ollama instance. If it not even served there, then an
# exception is raised because such model has either an incorrect name
# or it has not been deployed.
ollama_host = os.getenv('OLLAMA_HOST')
ollama_port = os.getenv('OLLAMA_PORT', 11434)
try:
ollama.show(model_name)
return OllamaLLM(model_name)
if ollama_host:
# The model is served in a remote ollama instance
remote_ollama_host = f'{ollama_host}:{ollama_port}'
return OllamaLLM(model_name, remote_ollama_host)
else:
# The model is served in a local ollama instance
ollama.show(model_name)
return OllamaLLM(model_name)
except (ollama.ResponseError, httpx.ConnectError):
raise ValueError(f'Model {model_name} not found')

Expand All @@ -127,7 +144,17 @@ def _calculate_list_of_supported_models(self) -> list[str]:
if os.getenv('MISTRAL_URL'):
models.append('mistral')
try:
ollama_models = [m['name'] for m in ollama.list()['models']]
ollama_models = []
ollama_host = os.getenv('OLLAMA_HOST')
ollama_port = os.getenv('OLLAMA_PORT', 11434)
if ollama_host:
# The model is served in a remote ollama instance
remote_ollama_host = f'{ollama_host}:{ollama_port}'
ollama_models = [m['model'] for m in
ollama.Client(remote_ollama_host).list()
['models']]
else:
ollama_models = [m['name'] for m in ollama.list()['models']]
return models + ollama_models
except httpx.ConnectError:
return models
Expand Down Expand Up @@ -325,28 +352,30 @@ def generate_completions_for_messages(self,


class OllamaLLM(LLM):
def __init__(self, model_name: str):
def __init__(self, model_name: str, host=None):
self.model_name = model_name
self.client = ollama.Client(host=host)

def __str__(self) -> str:
return f'{self.model_name}/Ollama LLM'

def generate(self,
system_prompt: str,
prompt: str,
temperature: float,
max_tokens: int,
n: int) -> list[str]:
max_tokens: int = 4096,
temperature: float = 0.3,
n: int = 1,) -> list[str]:
try:
messages = [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': prompt},
]
generations = [
ollama.generate(self.model_name,
prompt, system_prompt,
options={'temperature': temperature})
['response']
self.client.generate(model=self.model_name,
prompt=prompt,
system=system_prompt,
options={'temperature': temperature}
)['response']
for _ in range(n)]
return self._trace_llm_call(messages, Success(generations))
except Exception as e:
Expand All @@ -363,13 +392,14 @@ def generate_completions_for_messages(
n: int = 1) -> list[str]:
try:
generations = [
ollama.chat(self.model_name,
messages,
options={'temperature': temperature,
'top_p': top_p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty})
['message']['content']
self.client.chat(
self.model_name,
messages,
options={'temperature': temperature,
'top_p': top_p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty}
)['message']['content']
for _ in range(n)
]
return self._trace_llm_call(messages, Success(generations))
Expand Down
4 changes: 2 additions & 2 deletions backend-agent/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Deprecated==1.2.15
python-dotenv==1.0.1
faiss-cpu~=1.9.0
Flask==2.3.3
Flask-Cors==5.0.0
Flask-Cors==6.0.0
flask_sock==0.7.0
langchain~=0.2.16
PyYAML==6.0.2
Expand All @@ -17,7 +17,7 @@ tensorflow-hub
sentence-transformers
torchfile
tensorflow-text
ollama==0.4.2
ollama==0.4.7
weasyprint
pyrit==0.2.1
textattack>=0.3.10
Expand Down
Loading