File size: 1,912 Bytes
04bcdd6
247d94e
85e15be
 
1c9e991
04bcdd6
 
 
 
247d94e
04bcdd6
247d94e
3843078
247d94e
 
 
 
 
 
04bcdd6
 
2aad2cc
8e11cb5
 
04bcdd6
5074c03
4a980b1
04bcdd6
 
8e11cb5
4a980b1
8e11cb5
04bcdd6
 
 
8e11cb5
 
 
 
04bcdd6
 
 
 
 
 
 
 
d66c6b7
 
247d94e
04bcdd6
247d94e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
<!DOCTYPE html>
<html lang="en">
   
<head>
  
    <meta charset="utf-8" />
    <link rel="icon" href="/favicon.ico" />
    <meta name="viewport" content="width=device-width, initial-scale=1" />
    <meta name="theme-color" content="#000000" />
    <!-- <meta name="description" content="Web site created using create-react-app"/> -->
    <link rel="apple-touch-icon" href="/logo192.png" />
    <!-- <link rel="manifest" href="/manifest.json"/> -->
    <!-- <title>Tumeryk Model AttackGuard</title> -->
    <!-- <script defer="defer" src="/static/js/main.5f0c332d.js"></script> -->
    <link href="/static/css/main.47bafb87.css" rel="stylesheet">
</head>
<body>
    <!-- <noscript>You need to enable JavaScript to run this app.</noscript> -->
    <div id="root"></div>

    <center>
        <a target='_blank' href="http://www.tumeryk.com" > 
            <img src="/static/images/tmryk-logo-small.png" alt="Logo" class="logo" width="300">
        </a>

    <p>Test your model performance against adversarial attacks</p>
      </center>
    <p>
        <center>
            <a target='_blank' href="https://mag.tmryk.com" style="color: blue;"> <!-- Change text color to blue -->
                <br><br>Login or Register for a New Account
            </a>
        </center>
    </p>

    <!-- Adding two rows of space -->
    <p><br></p>
    <p><br></p>

    <p>
        <center>
            AI applications expose the business to new security attack vectors. Tumeryk secures in-house and third-party ML,
            LLM, and Gen AI models. This service helps Data Scientists to validate and protect ML models against Adversarial
            AI attacks like Evasion, Extraction, Inference, and Data Poisoning attacks that may be launched against these
            models resulting in loss of data, incorrect prediction/classification, and/or model theft.
        </center>
    </p>
  <br>
  <br>
</body>

</html>