[{"data":1,"prerenderedAt":1502},["ShallowReactive",2],{"/en-us/blog/tags/google/":3,"navigation-en-us":19,"banner-en-us":449,"footer-en-us":466,"google-tag-page-en-us":676},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":10,"_id":12,"_type":13,"title":14,"_source":15,"_file":16,"_stem":17,"_extension":18},"/en-us/blog/tags/google","tags",false,"",{"tag":9,"tagSlug":9},"google",{"template":11},"BlogTag","content:en-us:blog:tags:google.yml","yaml","Google","content","en-us/blog/tags/google.yml","en-us/blog/tags/google","yml",{"_path":20,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":22,"_id":445,"_type":13,"title":446,"_source":15,"_file":447,"_stem":448,"_extension":18},"/shared/en-us/main-navigation","en-us",{"logo":23,"freeTrial":28,"sales":33,"login":38,"items":43,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":24},{"href":25,"dataGaName":26,"dataGaLocation":27},"/","gitlab logo","header",{"text":29,"config":30},"Get free trial",{"href":31,"dataGaName":32,"dataGaLocation":27},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":34,"config":35},"Talk to sales",{"href":36,"dataGaName":37,"dataGaLocation":27},"/sales/","sales",{"text":39,"config":40},"Sign in",{"href":41,"dataGaName":42,"dataGaLocation":27},"https://gitlab.com/users/sign_in/","sign in",[44,88,186,191,297,357],{"text":45,"config":46,"cards":48,"footer":71},"Platform",{"dataNavLevelOne":47},"platform",[49,55,63],{"title":45,"description":50,"link":51},"The most comprehensive AI-powered DevSecOps Platform",{"text":52,"config":53},"Explore our Platform",{"href":54,"dataGaName":47,"dataGaLocation":27},"/platform/",{"title":56,"description":57,"link":58},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":59,"config":60},"Meet GitLab Duo",{"href":61,"dataGaName":62,"dataGaLocation":27},"/gitlab-duo/","gitlab duo ai",{"title":64,"description":65,"link":66},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":67,"config":68},"Learn more",{"href":69,"dataGaName":70,"dataGaLocation":27},"/why-gitlab/","why gitlab",{"title":72,"items":73},"Get started with",[74,79,84],{"text":75,"config":76},"Platform Engineering",{"href":77,"dataGaName":78,"dataGaLocation":27},"/solutions/platform-engineering/","platform engineering",{"text":80,"config":81},"Developer Experience",{"href":82,"dataGaName":83,"dataGaLocation":27},"/developer-experience/","Developer experience",{"text":85,"config":86},"MLOps",{"href":87,"dataGaName":85,"dataGaLocation":27},"/topics/devops/the-role-of-ai-in-devops/",{"text":89,"left":90,"config":91,"link":93,"lists":97,"footer":168},"Product",true,{"dataNavLevelOne":92},"solutions",{"text":94,"config":95},"View all Solutions",{"href":96,"dataGaName":92,"dataGaLocation":27},"/solutions/",[98,123,147],{"title":99,"description":100,"link":101,"items":106},"Automation","CI/CD and automation to accelerate deployment",{"config":102},{"icon":103,"href":104,"dataGaName":105,"dataGaLocation":27},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[107,111,115,119],{"text":108,"config":109},"CI/CD",{"href":110,"dataGaLocation":27,"dataGaName":108},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":61,"dataGaLocation":27,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":27,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":104,"dataGaLocation":27,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":27,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,137,142],{"text":133,"config":134},"Application Security Testing",{"href":135,"dataGaName":136,"dataGaLocation":27},"/solutions/application-security-testing/","Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":27,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":27},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":27},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":27,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":27,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":27,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":27,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":27,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":27,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":27,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":27},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":27},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":27},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":27,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":27},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":27},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":27},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":27},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":27},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":27},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":27},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":27},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":27},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":27},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":27},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":27},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":27},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":27},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":27},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":27},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":27},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":27},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":27},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":27},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":27},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":27},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":27},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":27},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":34,"config":364},{"href":36,"dataGaName":365,"dataGaLocation":27},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":27},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":27},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":41,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":56,"config":389},{"href":61,"dataGaName":56,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":108,"config":395},{"href":110,"dataGaName":108,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":69,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":32,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":61,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":13,"_source":15,"_file":464,"_stem":465,"_extension":18},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":27},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":672,"_type":13,"title":673,"_source":15,"_file":674,"_stem":675,"_extension":18},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":664},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,515,571,600,634],{"title":45,"links":493,"subMenu":498},[494],{"text":495,"config":496},"DevSecOps platform",{"href":54,"dataGaName":497,"dataGaLocation":475},"devsecops platform",[499],{"title":187,"links":500},[501,505,510],{"text":502,"config":503},"View plans",{"href":189,"dataGaName":504,"dataGaLocation":475},"view plans",{"text":506,"config":507},"Why Premium?",{"href":508,"dataGaName":509,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":511,"config":512},"Why Ultimate?",{"href":513,"dataGaName":514,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",{"title":516,"links":517},"Solutions",[518,523,525,527,532,537,541,544,548,553,555,558,561,566],{"text":519,"config":520},"Digital transformation",{"href":521,"dataGaName":522,"dataGaLocation":475},"/topics/digital-transformation/","digital transformation",{"text":133,"config":524},{"href":135,"dataGaName":133,"dataGaLocation":475},{"text":122,"config":526},{"href":104,"dataGaName":105,"dataGaLocation":475},{"text":528,"config":529},"Agile development",{"href":530,"dataGaName":531,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":533,"config":534},"Cloud transformation",{"href":535,"dataGaName":536,"dataGaLocation":475},"/topics/cloud-native/","cloud transformation",{"text":538,"config":539},"SCM",{"href":118,"dataGaName":540,"dataGaLocation":475},"source code management",{"text":108,"config":542},{"href":110,"dataGaName":543,"dataGaLocation":475},"continuous integration & delivery",{"text":545,"config":546},"Value stream management",{"href":162,"dataGaName":547,"dataGaLocation":475},"value stream management",{"text":549,"config":550},"GitOps",{"href":551,"dataGaName":552,"dataGaLocation":475},"/solutions/gitops/","gitops",{"text":172,"config":554},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":556,"config":557},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":559,"config":560},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":562,"config":563},"Education",{"href":564,"dataGaName":565,"dataGaLocation":475},"/solutions/education/","education",{"text":567,"config":568},"Financial services",{"href":569,"dataGaName":570,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":572},[573,575,577,579,582,584,586,588,590,592,594,596,598],{"text":204,"config":574},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":576},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":578},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":580},{"href":221,"dataGaName":581,"dataGaLocation":475},"docs",{"text":242,"config":583},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":585},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":587},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":589},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":591},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":593},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":595},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":597},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":599},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":601},[602,604,606,608,610,612,614,618,623,625,627,629],{"text":305,"config":603},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":605},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":607},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":609},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":611},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":613},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":615,"config":616},"Sustainability",{"href":617,"dataGaName":615,"dataGaLocation":475},"/sustainability/",{"text":619,"config":620},"Diversity, inclusion and belonging (DIB)",{"href":621,"dataGaName":622,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":624},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":626},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":628},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":630,"config":631},"Modern Slavery Transparency Statement",{"href":632,"dataGaName":633,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":635,"links":636},"Contact Us",[637,640,642,644,649,654,659],{"text":638,"config":639},"Contact an expert",{"href":36,"dataGaName":37,"dataGaLocation":475},{"text":367,"config":641},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":643},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":645,"config":646},"Status",{"href":647,"dataGaName":648,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":650,"config":651},"Terms of use",{"href":652,"dataGaName":653,"dataGaLocation":475},"/terms/","terms of use",{"text":655,"config":656},"Privacy statement",{"href":657,"dataGaName":658,"dataGaLocation":475},"/privacy/","privacy statement",{"text":660,"config":661},"Cookie preferences",{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"cookie preferences","ot-sdk-btn",{"items":665},[666,668,670],{"text":650,"config":667},{"href":652,"dataGaName":653,"dataGaLocation":475},{"text":655,"config":669},{"href":657,"dataGaName":658,"dataGaLocation":475},{"text":660,"config":671},{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":677,"featuredPost":1474,"totalPagesCount":1500,"initialPosts":1501},[678,705,734,756,775,796,818,839,862,881,903,922,942,961,981,1000,1018,1037,1055,1074,1094,1115,1135,1156,1175,1195,1218,1238,1258,1278,1299,1318,1338,1356,1375,1393,1415,1435,1455],{"_path":679,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":680,"content":689,"config":698,"_id":701,"_type":13,"title":702,"_source":15,"_file":703,"_stem":704,"_extension":18},"/en-us/blog/android-publishing-with-gitlab-and-fastlane",{"title":681,"description":682,"ogTitle":683,"ogDescription":682,"noIndex":6,"ogImage":684,"ogUrl":685,"ogSiteName":686,"ogType":687,"canonicalUrls":685,"schema":688},"Publishing Android apps to Play Store with GitLab & fastlane","See how GitLab, together with fastlane, can build, sign, and publish apps for Android to the Google Play Store.","HPublishing Android apps to Play Store with GitLab & fastlane","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679918/Blog/Hero%20Images/android-fastlane-pipeline.png","https://about.gitlab.com/blog/android-publishing-with-gitlab-and-fastlane","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish Android apps to the Google Play Store with GitLab and fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-01-28\",\n      }",{"title":690,"description":682,"authors":691,"heroImage":684,"date":693,"body":694,"category":695,"tags":696},"How to publish Android apps to the Google Play Store with GitLab and fastlane",[692],"Jason Yavorska","2019-01-28","When we heard about [_fastlane_](https://fastlane.tools), an app automation\ntool for delivering iOS and Android builds, we wanted to give it a spin to\nsee if a combination of GitLab and _fastlane_ could help us bring our mobile\nbuild and deployment automation to the next level and make mobile\ndevelopment a bit easier. You can see an [actual production\ndeployment](https://gitlab.com/gitlab-org/gitter/gitter-android-app/pipelines/40768761)\nof the [Gitter Android\napp](https://gitlab.com/gitlab-org/gitter/gitter-android-app) that uses what\nwe'll be implementing in this blog post; suffice to say, the results were\nfantastic and we've become big believers that the combination of GitLab and\n_fastlane_ is a truly game-changing way for developers to [enable\nCI/CD](/topics/ci-cd/) (continuous integration and continuous delivery) for\ntheir mobile applications. With GitLab and _fastlane_ we're getting, with\nminimal effort:\n\n\n- Source control, project home, issue tracking, and everything else that\ncomes with GitLab.\n\n- Content and images (metadata) for Google Play Store listing managed in\nsource control.\n\n- Automatic signing, version numbers, and changelog.\n\n- Automatic publishing to `internal` distribution channel in Google Play\nStore.\n\n- Manual promotion through `alpha`, `beta`, and `production` channels.\n\n- Containerized build environment, available in GitLab's container registry.\n\n\nIf you'd like to jump ahead and see the finished product, you can take a\nlook at the already-completed Gitter for Android\n[.gitlab-ci.yml](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/.gitlab-ci.yml),\n[build.gradle](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/app/build.gradle),\n[Dockerfile](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/Dockerfile),\nand [_fastlane_\nconfiguration](https://gitlab.com/gitlab-org/gitter/gitter-android-app/tree/master/fastlane).\n\n\n## Configuring _fastlane_\n\n\nWe'll begin first by setting up _fastlane_ in our project, make a couple key\nchanges to our Gradle configuration, and then wrap everything up in a GitLab\npipeline.\n\n\n_fastlane_ has pretty good\n[documentation](https://docs.fastlane.tools/getting-started/android/setup/)\nto get you started, and if you run into platform-specific trouble it's the\nfirst place to check, but to get under way you really just need to complete\na few straightforward steps.\n\n\n### Initializing your project\n\n\nFirst up, you need to get _fastlane_ installed locally and initialize your\nproduct. We're using the Ruby `fastlane` gem so you'll need Ruby on your\nsystem for this to work. You can read about [other install options in the\n_fastlane_\ndocumentation](https://docs.fastlane.tools/getting-started/android/setup/).\n\n\n``` ruby\n\nsource \"https://rubygems.org\"\n\n\ngem \"fastlane\"\n\n```\n\n\nOnce your Gemfile is updated, you can run `bundle update` to update/generate\nyour `Gemfile.lock`. From this point you can run _fastlane_ by typing\n`bundle exec fastlane`. Later, you'll see that in CI/CD we use `bundle\ninstall ...` to ensure the command runs within the context of our project\nenvironment.\n\n\nNow that we have _fastlane_ ready to run, we just need to initialize our\nrepo with our configuration. Run `bundle exec fastlane init` from within\nyour project directory, answer a few questions, and _fastlane_ will create a\nnew `./fastlane` directory containing its configuration.\n\n\n### Setting up _supply_\n\n\n_supply_ is a feature built into _fastlane_ which will help you manage\nscreenshots, descriptions, and other localized metadata/assets for\npublishing to the Google Play Store.\n\n\nPlease refer to these [detailed instructions for collecting the credentials\nnecessary to run\n_supply_](https://docs.fastlane.tools/getting-started/android/setup/#setting-up-supply).\n\n\nOnce you've set this up, simply run `bundle exec fastlane supply init` and\nall your current metadata will be downloaded from your store listing and\nsaved in `fastlane/metadata/android`. From this point you're able to manage\nall of your store content as-code; when we publish a new version to the\nstore later, the versions of content checked into your source repo will be\nused to populate the entry.\n\n\n### Appfile\n\n\nThe `./fastlane/Appfile` is pretty straightforward, and contains basic\nconfiguration you chose when you initialized your project. Later we'll see\nhow to inject the `json_key_file` in your CI/CD pipeline at runtime.\n\n\n`./fastlane/Appfile`\n\n``` yaml\n\njson_key_file(\"~/google_play_api_key.json\") # Path to the json secret file -\nFollow https://docs.fastlane.tools/actions/supply/#setup to get one\n\npackage_name(\"im.gitter.gitter\") # e.g. com.krausefx.app\n\n```\n\n\n### Fastfile\n\n\nThe `./fastlane/Fastfile` is more interesting, and contains the first\nchanges you'll see that we made for Gitter vs. the default one created when\nyou run `bundle exec fastlane init`.\n\n\nThe first section contains our definitions for how we want to run builds and\ntests. As you can see, this is pretty straightforward and builds right on\ntop of your already set up Gradle tasks.\n\n\n`./fastlane/Fastfile`\n\n``` yaml\n\ndefault_platform(:android)\n\n\nplatform :android do\n\n  desc \"Builds the debug code\"\n  lane :buildDebug do\n    gradle(task: \"assembleDebug\")\n  end\n\n  desc \"Builds the release code\"\n  lane :buildRelease do\n    gradle(task: \"assembleRelease\")\n  end\n\n  desc \"Runs all the tests\"\n  lane :test do\n    gradle(task: \"test\")\n  end\n\n...\n\n```\n\n\nCreating Gradle tasks that publish/promote builds can be complicated and\nerror prone, but _fastlane_ makes this much easier by giving you pre-built\ncommands (called _fastlane_ actions) that let you perform complex tasks with\njust a few simple actions.\n\n\nIn our example, we've set up a workflow where a new build can be published\nto the internal track and then optionally promoted through alpha, beta, and\nultimately production. We initially had a new build for each track but it's\nsafer to have the same/known build go through the whole process.\n\n\n``` yaml\n\n...\n\n  desc \"Submit a new Internal Build to Play Store\"\n  lane :internal do\n    upload_to_play_store(track: 'internal', apk: 'app/build/outputs/apk/release/app-release.apk')\n  end\n\n  desc \"Promote Internal to Alpha\"\n  lane :promote_internal_to_alpha do\n    upload_to_play_store(track: 'internal', track_promote_to: 'alpha')\n  end\n\n  desc \"Promote Alpha to Beta\"\n  lane :promote_alpha_to_beta do\n    upload_to_play_store(track: 'alpha', track_promote_to: 'beta')\n  end\n\n  desc \"Promote Beta to Production\"\n  lane :promote_beta_to_production do\n    upload_to_play_store(track: 'beta', track_promote_to: 'production')\n  end\nend\n\n```\n\n\nAn important note is that we've only scratched the surface of the kinds of\nactions that _fastlane_ can automate. You can [read more about available\nactions here](https://docs.fastlane.tools/actions/), and it's even possible\nto create your own.\n\n\n## Gradle configuration\n\n\nWe also made a couple of key changes to our basic Gradle configuration to\nmake publishing easier. Nothing major here, but it does help us make things\nrun a little more smoothly.\n\n\n### Secret properties\n\n\nThe first changed section gathers the secret variables to be used for\nsigning. These are either loaded via configuration file, or gathered from\nenvironment variables in the case of CI.\n\n\n`app/build.gradle`\n\n``` groovy\n\n// Try reading secrets from file\n\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\n\ndef secretProperties = new Properties()\n\n\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n\n// Otherwise read from environment variables, this happens in CI\n\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n    secretProperties.setProperty(\"google_project_id\", \"\\\"${System.getenv('google_project_id') ?: \"null\"}\\\"\")\n    secretProperties.setProperty(\"signing_keystore_password\", \"${System.getenv('signing_keystore_password')}\")\n    secretProperties.setProperty(\"signing_key_password\", \"${System.getenv('signing_key_password')}\")\n    secretProperties.setProperty(\"signing_key_alias\", \"${System.getenv('signing_key_alias')}\")\n}\n\n```\n\n\n### Automatic versioning\n\n\nWe also set up automatic versioning using environment variables\n`VERSION_CODE`, `VERSION_SHA`, which we will set up later in CI/CD (locally\nthey will just be `null` which is fine). Because each build's `versionCode`\nthat you submit to the Google Play Store needs to be higher than the last,\nthis makes it simple to deal with.\n\n\n`app/build.gradle`\n\n``` groovy\n\nandroid {\n    defaultConfig {\n        applicationId \"im.gitter.gitter\"\n        minSdkVersion 19\n        targetSdkVersion 26\n        versionCode Integer.valueOf(System.env.VERSION_CODE ?: 0)\n        // Manually bump the semver version part of the string as necessary\n        versionName \"3.2.0-${System.env.VERSION_SHA}\"\n```\n\n\n### Signing configuration\n\n\nFinally, we inject the signing configuration which will automatically be\nused by Gradle to sign the release build. Depending on your configuration,\nyou may already be doing this. We only worry about signing in the release\nbuild that would potentially be published to the Google Play Store.\n\n\n> When using App Signing by Google Play, you will use two keys: the app\nsigning key and the upload key. You keep the upload key and use it to sign\nyour app for upload to the Google Play Store.\n\n>\n\n>\n[*https://developer.android.com/studio/publish/app-signing#google-play-app-signing*](https://developer.android.com/studio/publish/app-signing#google-play-app-signing)\n\n\n> IMPORTANT: Google will not re-sign any of your existing or new APKs that\nare signed with the app signing key. This enables you to start testing your\napp bundle in the internal test, alpha, or beta tracks while you continue to\nrelease your existing APK in production without Google Play changing it.\n\n>\n\n>\n*`https://play.google.com/apps/publish/?account=xxx#KeyManagementPlace:p=im.gitter.gitter&appid=xxx`*\n\n\n`app/build.gradle`\n\n``` groovy\n    signingConfigs {\n        release {\n            // You need to specify either an absolute path or include the\n            // keystore file in the same directory as the build.gradle file.\n            storeFile file(\"../android-signing-keystore.jks\")\n            storePassword \"${secretProperties['signing_keystore_password']}\"\n            keyAlias \"${secretProperties['signing_key_alias']}\"\n            keyPassword \"${secretProperties['signing_key_password']}\"\n        }\n    }\n    buildTypes {\n        release {\n            minifyEnabled false\n            testCoverageEnabled false\n            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n            signingConfig signingConfigs.release\n        }\n    }\n}\n\n```\n\n\n## Setting up the Docker build environment\n\n\nWe are building a Docker image to be used as a repeatable, consistent build\nenvironment which will speed things up because it will already have the\ndependencies downloaded and installed. We're just fetching a few\nprerequisites, installing the Android SDK, and then grabbing _fastlane_.\n\n\n`Dockerfile`\n\n```dockerfile\n\nFROM openjdk:8-jdk\n\n\n# Just matched `app/build.gradle`\n\nENV ANDROID_COMPILE_SDK \"26\"\n\n# Just matched `app/build.gradle`\n\nENV ANDROID_BUILD_TOOLS \"28.0.3\"\n\n# Version from https://developer.android.com/studio/releases/sdk-tools\n\nENV ANDROID_SDK_TOOLS \"24.4.1\"\n\n\nENV ANDROID_HOME /android-sdk-linux\n\nENV PATH=\"${PATH}:/android-sdk-linux/platform-tools/\"\n\n\n# install OS packages\n\nRUN apt-get --quiet update --yes\n\nRUN apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\nbuild-essential ruby ruby-dev\n\n# We use this for xxd hex->binary\n\nRUN apt-get --quiet install --yes vim-common\n\n# install Android SDK\n\nRUN wget --quiet --output-document=android-sdk.tgz\nhttps://dl.google.com/android/android-sdk_r${ANDROID_SDK_TOOLS}-linux.tgz\n\nRUN tar --extract --gzip --file=android-sdk.tgz\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter android-${ANDROID_COMPILE_SDK}\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter platform-tools\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter build-tools-${ANDROID_BUILD_TOOLS}\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-android-m2repository\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-google-google_play_services\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-google-m2repository\n\n# install Fastlane\n\nCOPY Gemfile.lock .\n\nCOPY Gemfile .\n\nRUN gem install bundle\n\nRUN bundle install\n\n```\n\n\n## Setting up GitLab\n\n\nWith our build environment ready, let's set up our `.gitlab-ci.yml` to tie\nit all together in a CI/CD pipeline.\n\n\n### Stages\n\n\nThe first thing we do is define the stages that we're going to use. We'll\nset up our build environment, do our debug and release builds, run our\ntests, deploy to internal, and then promote through alpha, beta, and\nproduction. You can see that, apart from `environment`, these map to the\nlanes we set up in our `Fastfile`.\n\n\n``` yaml\n\nstages:\n  - environment\n  - build\n  - test\n  - internal\n  - alpha\n  - beta\n  - production\n```\n\n\n### Build environment update\n\n\nNext up we're going to update our build environment, if needed. If you're\nnot familiar with `.gitlab-ci.yml` it may look like there's a lot going on\nhere, but we'll take it one step at a time. The very first thing we do is\nset up an `.updateContainerJob` yaml template which can be used to capture\nshared configuration for other steps that want to use it. In this case, it\nwill be used by the subsequent `updateContainer` and `ensureContainer` jobs.\n\n\n#### `.updateContainerJob` template\n\n\nIn this case, since we're dealing with Docker in Docker (`dind`), we are\nrunning some scripts which log into the local [GitLab container\nregistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html),\nfetch the latest image to be used as a layer cache reference, build a new\nimage, and finally push the new version to the registry.\n\n\n``` yaml\n\n.updateContainerJob:\n  image: docker:stable\n  stage: environment\n  services:\n    - docker:dind\n  script:\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    - docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG || true\n    - docker build --cache-from $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG -t $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG .\n    - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n```\n\n\n#### `updateContainer` job\n\n\nThe first job that inherits `.updateContainerJob`, `updateContainer`, only\nruns if the `Dockerfile` was updated and will run through the template steps\ndescribed above.\n\n\n``` yaml\n\nupdateContainer:\n  extends: .updateContainerJob\n  only:\n    changes:\n      - Dockerfile\n```\n\n\n#### `ensureContainer` job\n\n\nBecause the first pipeline on a branch can fail, the `only: changes:\nDockerfile` syntax won't trigger for a subsequent pipeline after you fix\nthings. This can leave your branch without a Docker image to use. So the\n`ensureContainer` job will look for an existing image and only build one if\nit doesn't exist. The one downside to this is that both of these jobs will\nrun at the same time if it is a new branch.\n\n\nIdeally, we could just use `$CI_REGISTRY_IMAGE:master` as a fallback when\n`$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG` isn't found but there isn't any\nsyntax for this.\n\n\n``` yaml\n\nensureContainer:\n  extends: .updateContainerJob\n  allow_failure: true\n  before_script:\n    - \"mkdir -p ~/.docker && echo '{\\\"experimental\\\": \\\"enabled\\\"}' > ~/.docker/config.json\"\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    # Skip update container `script` if the container already exists\n    # via https://gitlab.com/gitlab-org/gitlab-ce/issues/26866#note_97609397 -> https://stackoverflow.com/a/52077071/796832\n    - docker manifest inspect $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG > /dev/null && exit || true\n```\n\n\n### Build and test\n\n\nWith our build environment ready, we're ready to build our `debug` and\n`release` targets. Similar to above, we use a template to set up repeated\nsteps within our build jobs, avoiding duplication. Within this section, the\nfirst thing we do is set the image to the build environment container image\nwe built in the previous step.\n\n\n#### `.build_job` template\n\n\n``` yaml\n\n.build_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: build\n\n...\n\n```\n\n\nNext up is a step that's specific to Gitter, but if you use shared assets\nbetween a iOS and Android build you might consider doing something similar.\nWhat we're doing here is grabbing the latest mobile artifacts built by the\nweb application pipeline and placing them in the appropriate location.\n\n\n``` yaml\n  before_script:\n    - wget --output-document=artifacts.zip --quiet \"https://gitlab.com/gitlab-org/gitter/webapp/-/jobs/artifacts/master/download?job=mobile-asset-build\"\n    - unzip artifacts.zip\n    - mkdir -p app/src/main/assets/www\n    - mv output/android/www/* app/src/main/assets/www/\n```\n\n\nNext, we use [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) containing a binary\n(hex) dump of our signing keystore file and convert it back to a binary\nfile. This allows us to inject the file into the build at runtime instead of\nchecking it into source control, a potential security concern. To get the\n`signing_jks_file_hex` variable hex value, we use this binary -> hex\ncommand, `xxd -p gitter-android-app.jks`\n\n\n``` yaml\n    # We store this binary file in a variable as hex with this command, `xxd -p gitter-android-app.jks`\n    # Then we convert the hex back to a binary file\n    - echo \"$signing_jks_file_hex\" | xxd -r -p - > android-signing-keystore.jks\n```\n\n\nHere we're setting the version at runtime – these environment variables will\nbe used by the Gradle build as implemented above. Because `$CI_PIPELINE_IID`\nincrements on each pipeline, we can guarantee our `versionCode` is always\nhigher than the last and be able to publish to the Google Play Store.\n\n\n``` yaml\n    # We add 100 to get this high enough above current versionCodes that are published\n    - \"export VERSION_CODE=$((100 + $CI_PIPELINE_IID)) && echo $VERSION_CODE\"\n    - \"export VERSION_SHA=`echo ${CI_COMMIT_SHORT_SHA}` && echo $VERSION_SHA\"\n```\n\n\nNext, we automatically generate a changelog to include by copying whatever\nyou have in `CURRENT_VERSION.txt` to the current `\u003CversionCode>.text`. You\ncan update `CURRENT_VERSION.txt` as necessary. I won't dive into the details\nof the merge request (MR) creation script here since it's somewhat specific\nto Gitter, but if you're interested in how something like this might work\ncheck out the [`create-changlog-mr.sh`\nscript](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh).\n\n\n``` yaml\n    # Make the changelog\n    - cp ./fastlane/metadata/android/en-GB/changelogs/CURRENT_VERSION.txt \"./fastlane/metadata/android/en-GB/changelogs/$VERSION_CODE.txt\"\n    # We allow the remote push and MR creation to fail because the other job could create it\n    # and it's not strictly necessary (we just need the file locally for the CI/CD build)\n    - ./ci-scripts/create-changlog-mr.sh || true\n    # Because we allow the MR creation to fail, just make sure we are back in the right repo state\n    - git checkout \"$CI_COMMIT_SHA\"\n```\n\n\nJust a couple of final items: First, whenever a build job is done, we remove\nthe jks file just to be sure it doesn't get saved to artifacts, and second\nwe set up the artifact directory from where the output of the build (`.apk`)\nwill be saved.\n\n\n``` yaml\n  after_script:\n    - rm android-signing-keystore.jks || true\n  artifacts:\n    paths:\n    - app/build/outputs\n```\n\n\n#### `buildDebug` and `buildRelease` jobs\n\n\nMost of the complexity here was set up in the template, so as you can see\nour `buildDebug` and `buildRelease` job definitions are very clear. Both\njust call the appropriate _fastlane_ task (which, if you remember, then\ncalls the appropriate Gradle task). The `buildRelease` output is associated\nwith the `production` environment so we can define an extra\nproduction-scoped set of [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) which are different\nfrom our testing variables.\n\n\nSince we set up code signing in the Gradle config (`build.gradle`) earlier,\nwe can be confident here that our `release` builds are appropriately signed\nand ready for publishing.\n\n\n```\n\nbuildDebug:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildDebug\n\nbuildRelease:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildRelease\n  environment:\n    name: production\n```\n\n\nTesting is really just another instance of the same thing, but instead of\ncalling one of the build lanes we call the test lane. Note that we are using\na `dependency` from the `buildDebug` job to ensure we don't need to rebuild\nanything.\n\n\n``` yaml\n\ntestDebug:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: test\n  dependencies:\n    - buildDebug\n  script:\n    - bundle exec fastlane test\n```\n\n\n### Publish\n\n\nNow that our code is being built, we're ready to publish to the Google Play\nStore. We only *publish* to the `internal` testing track and *promote* this\nsame build to the rest of the tracks.\n\n\nThis is achieved through the _fastlane_ integration, using a pre-built\naction to handle the job. In this case we are using a `dependency` on the\n`buildRelease` job, and creating a local copy of the Google API JSON keyfile\n(again stored in a [project-level\nvariable](https://docs.gitlab.com/ee/ci/variables/) instead of checking it\ninto source control.) We have this job (and all subsequent jobs) set to run\nonly on `manual` action so we have full human control/intervention from this\npoint forward. If you prefer to continuously deliver to your `internal`\ntrack you'd simply need to remove the `when: manual` entry and you'd have\nachieved your goal.\n\n\nIf you're like me, this may seem too easy to work. With everything we've\nconfigured in GitLab and _fastlane_ to this point, it's really this simple!\n\n\n``` yaml\n\npublishInternal:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: internal\n  dependencies:\n    - buildRelease\n  when: manual\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n  script:\n    - bundle exec fastlane internal\n```\n\n\n### Promote\n\n\nAs indicated earlier, promotion through alpha, beta, and production are all\n`manual` jobs. If internal testing is good, it can be promoted one step\nforward in sequence all the way through to production using these manual\njobs.\n\n\nIf you're with me to this point, there's really nothing new here and this\nreally highlights the power of GitLab with _fastlane_. We have a\n`.promote_job` template job which creates the local Google API JSON key file\nand the promote jobs themselves are basically identical.\n\n\n``` yaml\n\n.promote_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  when: manual\n  dependencies: []\n  only:\n    - master\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n\npromoteAlpha:\n  extends: .promote_job\n  stage: alpha\n  script:\n    - bundle exec fastlane promote_internal_to_alpha\n\npromoteBeta:\n  extends: .promote_job\n  stage: beta\n  script:\n    - bundle exec fastlane promote_alpha_to_beta\n\npromoteProduction:\n  extends: .promote_job\n  stage: production\n  script:\n    - bundle exec fastlane promote_beta_to_production\n```\n\n\nNote that we're `only` allowing production promotion from the `master`\nbranch, instead of from any branch. This is to ensure that the production\nbuild uses the separate set of `production` environment variables which only\nhappens for the `buildRelease` job. We also have these [variables set as\nprotected](https://docs.gitlab.com/ee/ci/variables/#protected-variables) so\nwe can enforce that they are only used on the `master` branch which is\nprotected.\n\n\n### Variables\n\n\nThe last step is to make sure you set up the [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) we used throughout the\nconfiguration above:\n\n - `google_play_service_account_api_key_json`: see [https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials](https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials)\n - `oauth_client_id`\n - `oauth_client_id`, protected, `production` environment\n - `oauth_client_secret`\n - `oauth_client_secret`, protected, `production` environment\n - `oauth_redirect_uri`\n - `oauth_redirect_uri`, protected, `production` environment\n - `signing_jks_file_hex`: `xxd -p gitter-android-app.jks`\n - `signing_key_alias`\n - `signing_key_password`\n - `signing_keystore_password`\n\nIf you are using the same [`create-changlog-mr.sh`\nscript](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh)\nas us,\n\n - `deploy_key_android_repo`: see [https://docs.gitlab.com/ee/user/project/deploy_tokens/](https://docs.gitlab.com/ee/user/project/deploy_tokens/)\n - `gitlab_api_access_token`: see [https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) (we use a bot user)\n\n![Project variables for Gitter for\nAndroid](https://about.gitlab.com/images/blogimages/android-fastlane-variables.png){:\n.shadow.medium.center}\n\n\n## What's next\n\n\nUsing this configuration we've got Gitter for Android building, signing,\ndeploying to our internal track, and publishing to production as frequently\nas we like. Next up will be to do the same for iOS, so watch this space for\nour next post!\n\n\nPhoto by [Patrick Tomasso](https://unsplash.com/@impatrickt) on\n[Unsplash](https://unsplash.com/photos/KGcLJwIYiac)\n\n{: .note}\n","engineering",[108,232,9,697],"features",{"slug":699,"featured":6,"template":700},"android-publishing-with-gitlab-and-fastlane","BlogPost","content:en-us:blog:android-publishing-with-gitlab-and-fastlane.yml","Android Publishing With Gitlab And Fastlane","en-us/blog/android-publishing-with-gitlab-and-fastlane.yml","en-us/blog/android-publishing-with-gitlab-and-fastlane",{"_path":706,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":707,"content":713,"config":728,"_id":730,"_type":13,"title":731,"_source":15,"_file":732,"_stem":733,"_extension":18},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":708,"description":709,"ogTitle":708,"ogDescription":709,"noIndex":6,"ogImage":710,"ogUrl":711,"ogSiteName":686,"ogType":687,"canonicalUrls":711,"schema":712},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":708,"description":709,"authors":714,"heroImage":710,"date":717,"body":718,"category":695,"tags":719},[715,716],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code\nSuggestions](https://about.gitlab.com/solutions/code-suggestions/) need\nlow-latency response times for a frictionless developer experience. Users\ndon’t want to interrupt their flow and wait for a code suggestion to show\nup. To ensure GitLab Duo can provide the right suggestion at the right time\nand meet high performance standards for critical AI infrastructure, GitLab\nrecently launched our first multi-region service to deliver AI features.\n\n\nIn this article, we will cover the benefits of multi-region services, how we\nbuilt an internal platform codenamed ‘Runway’ for provisioning and deploying\nmulti-region services using GitLab features, and the lessons learned\nmigrating to multi-region in production.\n\n\n## Background on the project\n\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning,\ndeploying, and operating containerized services. Runway's purpose is to\nenable GitLab service owners to self-serve infrastructure needs with\nproduction readiness out of the box, so application developers can focus on\nproviding value to customers. As part of [our corporate value of\ndogfooding](https://handbook.gitlab.com/handbook/values/#results), the first\niteration was built in 2023 by the Infrastructure department on top of core\nGitLab capabilities, such as continuous integration/continuous delivery\n([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and\ndeployments.\n\n\nBy establishing automated GitOps best practices, Runway services use\ninfrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\n\nGitLab Duo is primarily powered by [AI\nGateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist),\na satellite service written in Python outside of GitLab’s modular monolith\nwritten in Ruby. In cloud computing, a region is a geographical location of\ndata centers operated by cloud providers.\n\n\n## Defining a multi-region strategy\n\n\nDeploying in a single region is a good starting point for most services, but\ncan come with downsides when you are trying to reach a global audience.\nUsers who are geographically far from where your service is deployed may\nexperience different levels of service and responsiveness than those who are\ncloser. This can lead to a poor user experience, even if your service is\nwell built in all other respects.\n\n\nFor AI Gateway, it was important to meet global customers wherever they are\nlocated, whether on GitLab.com or self-managed instances using Cloud\nConnector. When a developer is deciding to accept or reject a code\nsuggestion, milliseconds matter and can define the user experience.\n\n\n### Goals\n\n\nMulti-region deployments require more infrastructure complexity, but for use\ncases where latency is a core component of the user experience, the benefits\noften outweigh the downsides. First, multi-region deployments offer\nincreased responsiveness to the user. By serving requests from locations\nclosest to end users, latency can be significantly reduced. Second,\nmulti-region deployments provide greater availability. With fault tolerance,\nservices can fail over during a regional outage. There is a much lower\nchance of a service failing completely, meaning users should not be\ninterrupted even in partial failures.\n\n\nBased on our goals for performance and availability, we used this\nopportunity to create a scalable multi-region strategy in Runway, which is\nbuilt leveraging GitLab features.\n\n\n### Architecture\n\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud\nPlatform (GCP). As a result, Runway’s first supported platform runtime is\nCloud Run. The initial workloads deployed on Runway are stateless satellite\nservices (e.g., AI Gateway), so Cloud Run services are a good fit that\nprovide a clear migration path to more complex and flexible platform\nruntimes, e.g. Kubernetes.\n\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to\niterate and tease out the right level of abstractions for service owners as\npart of a platform play in the Infrastructure department.\n\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region\ndeployment strategy must support global load balancing, and the provisioning\nand configuration of regional resources. Here’s a simplified diagram of the\nproposed architecture in GCP:\n\n\n![simplified diagram of the proposed architecture in\nGCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\n\nBy replicating Cloud Run services across multiple regions and configuring\nthe existing global load balancing with serverless network endpoint group\n(NEG) backends, we’re able to serve traffic from multiple regions. For the\nremainder of the article, we’ll focus less on specifics of Cloud Run and\nmore on how we’re building with GitLab.\n\n\n## Building a multi-region platform with GitLab\n\n\nNow that you have context about Runway, let's walk through how to build a\nmulti-region platform using GitLab features.\n\n\n### Provision\n\n\nWhen building an internal platform, the first challenge is provisioning\ninfrastructure for a service. In Runway, Provisioner is the component that\nis responsible for maintaining a service inventory and managing IaC for GCP\nresources using Terraform.\n\n\nTo provision a service, an application developer will open an MR to add a\nservice project to the inventory using git, and Provisioner will create\nrequired resources, such as service accounts and identity and access\nmanagement policies. When building this functionality with GitLab, Runway\nleverages [OpenID Connect (OIDC) with GPC Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/)\nfor managing IaC.\n\n\nAdditionally, Provisioner will create a deployment project for each service\nproject. The purpose of creating separate projects for deployments is to\nensure the [principle of least\nprivilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/)\nby authenticating as a GCP service account with restricted permissions.\nRunway leverages the [Projects\nAPI](https://docs.gitlab.com/ee/api/projects.html) for creating projects\nwith [Terraform\nprovider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\n\nFinally, Provisioner defines variables in the deployment project for the\nservice account, so that deployment CI jobs can authenticate to GCP. Runway\nleverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and\n[Job Token\nallowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist)\nto handle authentication and authorization.\n\n\nHere’s a simplified example of provisioning a multi-region service in the\nservice inventory:\n\n\n```\n\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n\n```\n\n\nOnce provisioned, a deployment project and necessary infrastructure will be\ncreated for a service.\n\n\n### Configure\n\n\nAfter a service is provisioned, the next challenge is the configuration for\na service. In Runway,\n[Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl)\nis a component that is responsible for configuring and deploying services by\naligning the actual state with the desired state using Golang and Terraform.\n\n\nHere’s a simplified example of an application developer configuring GitLab\nCI/CD in their service project:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n\n```\n\n\nRunway provides sane default values for configuration that are based on our\nexperience in delivering stable and reliable features to customers.\nAdditionally, service owners can configure infrastructure using a service\nmanifest file hosted in a service project. The service manifest uses JSON\nSchema for validation. When building this functionality with GitLab, Runway\nleverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema\ndocumentation.\n\n\nTo deliver this part of the platform, Runway leverages [CI/CD\ntemplates](https://docs.gitlab.com/ee/development/cicd/templates.html),\n[Releases](https://docs.gitlab.com/ee/user/project/releases/), and\n[Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for\nintegrating with service projects.\n\n\nHere’s a simplified example of a service manifest:\n\n\n```\n\n# .runway/runway-production.yml\n\napiVersion: runway/v1\n\nkind: RunwayService\n\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n\n```\n\n\nFor multi-region services, Runway injects an environment variable into the\ncontainer instance runtime, e.g. RUNWAY\\_REGION, so application developers\nhave the context to make any downstream dependencies regionally-aware, e.g.\nVertex AI API.\n\n\nOnce configured, a service project will be integrated with a deployment\nproject.\n\n\n### Deploy\n\n\nAfter a service project is configured, the next challenge is deploying a\nservice. In Runway, Reconciler handles this by triggering a deployment job\nin the deployment project when an MR is merged to the main branch. When\nbuilding this functionality with GitLab, Runway leverages [Trigger\nPipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project\nPipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines)\nto trigger jobs from service project to deployment project.\n\n\n![trigger jobs from service project to deployment\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\n\nOnce a pipeline is running in a deployment project, it will be deployed to\nan environment. By default, Runway will provision staging and production\nenvironments for all services. At this point, Reconciler will apply any\nTerraform resource changes for infrastructure. When building this\nfunctionality with GitLab, Runway leverages\n[Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and\n[GitLab-managed Terraform\nstate](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html)\nfor each service.\n\n\n![Reconciler applies any Terraform resource changes for\ninfrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\n\nRunway provides default application metrics for services. Additionally,\ncustom metrics can be used by enabling a sidecar container with\nOpenTelemetry Collector configured to scrape Prometheus and remote write to\nMimir. By providing observability out of the box, Runway is able to bake\nmonitoring into CI/CD pipelines.\n\n\nExample scenarios include gradual rollouts for blue/green deployments,\npreventing promotions to production when staging is broken, or automatically\nrolling back to previous revision when elevated error rates occur in\nproduction.\n\n\n![Runway bakes monitoring into CI/CD\npipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\n\nOnce deployed, environments will serve the latest revision of a service. At\nthis point, you should have a good understanding of some of the challenges\nthat will be encountered, and how to solve them with GitLab features.\n\n\n## Migrating to multi-region in production\n\n\nAfter extending Runway components to support multi-region in Cloud Run, the\nfinal challenge was migrating from AI Gateway’s single-region deployment in\nproduction with zero downtime. Today, teams using Runway to deploy their\nservices can self-serve on regions making a multi-region deployment just as\nsimple as a single-region deployment. \n\n\nWe were able to iterate on building multi-region functionality without\nimpacting existing infrastructure by using semantic versioning for Runway.\nNext, we’ll share some learnings from the migration that may inform how to\noperate services for an internal multi-region platform.\n\n\n### Dry run deployments\n\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off\nis that plans cannot be verified in advance, which could risk inadvertently\ndestroying or misconfiguring production infrastructure. To solve this\nproblem, Runway will perform a “dry run” deployment for MRs.\n\n\n![\"Dry run\"\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\n\nFor migrating AI Gateway, dry run deployments increased confidence and\nhelped mitigate risk of downtime during rollout. When building an internal\nplatform with GitLab, we recommend supporting dry run deployments from the\nstart.\n\n\n### Regional observability\n\n\nIn Runway, existing observability was aggregated by assuming a single-region\ndeployment. To solve this problem, Runway observability was retrofitted to\ninclude a new region label for Prometheus metrics.\n\n\nOnce metrics were retrofitted, we were able to introduce service level\nindicators (SLIs) for both regional Cloud Run services and global load\nbalancing. Here’s an example dashboard screenshot for a general Runway\nservice:\n\n\n![dashboard screenshot for a general Runway\nservice](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nAdditionally, we were able to update our service level objectives (SLOs) to\nsupport regions. As a result, service owners could be alerted when a\nspecific region experiences an elevated error rate, or increase in response\ntimes.\n\n\n![screenshot of\nalerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nFor migrating AI Gateway, regional observability increased confidence and\nhelped provide more visibility into new infrastructure. When building an\ninternal platform with GitLab, we recommend supporting regional\nobservability from the start.\n\n\n### Self-service regions\n\n\nThe Infrastructure department successfully performed the initial migration\nof multi-region support for AI Gateway in production with zero downtime.\nGiven the risk associated with rolling out a large infrastructure migration,\nit was important to ensure the service continued working as expected.\n\n\nShortly afterwards, service owners began self-serving additional regions to\nmeet the growth of customers. At the time of writing, [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) is available in six regions\naround the globe and counting. Service owners are able to configure the\ndesired regions, and Runway will provide guardrails along the way in a\nscalable solution.\n\n\nAdditionally, three other internal services have already started using\nmulti-region functionality on Runway. Application developers have entirely\nself-served functionality, which validates that we’ve provided a good\nplatform experience for service owners. For a platform play, a scalable\nsolution like Runway is considered a good outcome since the Infrastructure\ndepartment is no longer a blocker.\n\n\n## What’s next for Runway\n\n\nBased on how quickly we could iterate to provide results for customers, the\nSaaS Platforms department has continued to invest in Runway. We’ve grown the\nRunway team with additional contributors, started evolving the platform\nruntime (e.g. Google Kubernetes Engine), and continue dogfooding with\ntighter integration in the product.\n\n\nIf you’re interested in learning more, feel free to check out\n[https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n\n## More Building GitLab with GitLab\n\n- [Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n- [Stress-testing Product\nAnalytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n\n- [Web API Fuzz\nTesting](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n\n- [How GitLab.com inspired\nDedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n\n- [Expanding our security certification\nportfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n",[108,720,721,722,723,724,9,725,726,727],"CD","CI","inside GitLab","tutorial","performance","git","DevSecOps","AI/ML",{"slug":729,"featured":90,"template":700},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":735,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":736,"content":742,"config":750,"_id":752,"_type":13,"title":753,"_source":15,"_file":754,"_stem":755,"_extension":18},"/en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"title":737,"description":738,"ogTitle":737,"ogDescription":738,"noIndex":6,"ogImage":739,"ogUrl":740,"ogSiteName":686,"ogType":687,"canonicalUrls":740,"schema":741},"Deploy a NodeJS Express app with GitLab's Cloud Run integration","This tutorial will show you how to use NodeJS and Express to deploy an application to Google Cloud. This step-by-step guide will have you up and running in less than 10 minutes with the Cloud Run integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097892/Blog/Hero%20Images/Blog/Hero%20Images/speedlights_speedlights.png_1750097891963.png","https://about.gitlab.com/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a NodeJS Express app with GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":737,"description":738,"authors":743,"heroImage":739,"date":746,"body":747,"category":695,"tags":748},[744,745],"Sarah Matthies","Noah Ing","2025-01-13","Are you looking to deploy your NodeJS app to Google Cloud with the least\nmaintenance possible? This tutorial will show you how to utilize GitLab’s\nGoogle Cloud integration to deploy your NodeJS app in less than 10 minutes.\n\n\nTraditionally, deploying an application often requires assistance from\nproduction or DevOps engineers. This integration now empowers developers to\nhandle deployments independently. Whether you’re a solo developer or part of\na large team, this setup gives everyone the ability to deploy their\napplications efficiently.\n\n\n## Overview\n\n\n- Create a new project in GitLab\n\n- Set up your NodeJS application\n\n- Use the Google Cloud integration to create a Service account\n\n- Use the Google Cloud integration to configure Cloud Run via Merge Request\n\n- Enjoy your newly deployed NodeJS app\n\n- Follow the cleanup guide\n\n\n## Prerequisites\n\n- Owner access on a Google Cloud Platform project\n\n- Working knowledge of JavaScript/TypeScript (not playing favorites here!)\n\n- Working knowledge of GitLab CI\n\n- 10 minutes \n\n\n## Step-by-step guide\n\n\n### 1. Create a new project in GitLab\n\n\nWe decided to call our project `nodejs–express-cloud-run` for simplicity.\n\n\n![Create a new\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097905106.png)\n\n\n### 2. Upload your NodeJS app or use this example to get started.\n\n\n[Demo](https://gitlab.com/demos/templates/nodejs-cloud-run)\n\n\n**Note:** Make sure to include the `cloud-run` [CI\ntemplate](https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library/-/raw/main/gcp/cloud-run.gitlab-ci.yml)\nwithin your project.\n\n\n![cloud-run CI template\ninclude](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097905107.png)\n\n\n### 3. Use the Google Cloud integration to create a Service account.\n\n\nNavigate to __Operate > Google Cloud > Create Service account__.\n\n\n![Create Service account\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097905109.png)\n\n\nAlso configure the region you would like the Cloud Run instance deployed to.\n\n\n![Cloud Run instance deployment region\nselection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097905113.png)\n\n\n### 4. Go to the Deployments tab and use the Google Cloud integration to\nconfigure __Cloud Run via Merge Request__.\n\n\n![Deployments - Configuration of Cloud Run via Merge\nRequest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097905115.png)\n\n\nThis will open a merge request – immediately merge it.\n\n\n![Merge request for\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097905117.png)\n\n\n__Note:__ `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, and\n`GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the\nprevious steps.\n\n\n![Variables\nlisting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097905118.png)\n\n\n### 5. Voila! Check your pipeline and you will see you have successfully\ndeployed to Google Cloud Run using GitLab CI.\n\n\n![Successful deployment to Google Cloud\nRun](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097905119.png)\n\n\nClick the Service URL to view your newly deployed Node server.\n\n\n![View newly deployed Node\nserver](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097905120.png)\n\n\nIn addition, you can navigate to __Operate > Environments__ to see a list of\ndeployments for your environments.\n\n\n![Environments view of deployment\nlist](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097905121.png)\n\n\nBy clicking on the environment called `main`, you’ll be able to view a\ncomplete list of deployments specific to that environment.\n\n\n![Main view of deployments to specific\nenvironment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097905122.png)\n\n\n### 6. Next steps\n\n\nTo get started with developing your Node application, try adding another\nendpoint. For instance, in your `index.js` file, you can add a **/bye**\nendpoint as shown below:\n\n\n```\n\napp.get('/bye', (req, res) => {\n  res.send(`Have a great day! See you!`);\n});\n\n\n```\n\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy\nthe updates. Once it’s complete, go back to the Service URL and navigate to\nthe **/bye** endpoint to see the new functionality in action.\n\n\n![Bye\nmessage](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097905123.png)\n\n\n## Follow the cleanup guide\n\n\nTo prevent incurring charges on your Google Cloud account for the resources\nused in this tutorial, you can either delete the specific resources or\ndelete the entire Google Cloud project. For detailed instructions, refer to\nthe [cleanup guide\nhere](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n\n> Read more of these helpful [tutorials from GitLab solutions\narchitects](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[108,9,232,749,723],"solutions architecture",{"slug":751,"featured":90,"template":700},"deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","content:en-us:blog:deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","Deploy A Nodejs Express App With Gitlabs Cloud Run Integration","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"_path":757,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":758,"content":761,"config":769,"_id":771,"_type":13,"title":772,"_source":15,"_file":773,"_stem":774,"_extension":18},"/en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"noIndex":6,"title":759,"description":760},"Fast and secure AI agent deployment to Google Cloud with GitLab","Follow this step-by-step guide, complete with a demo application, to learn how to use agentic AI, along with GitLab's native integrations and CI/CD components.",{"title":759,"description":760,"authors":762,"heroImage":764,"date":765,"body":766,"category":767,"tags":768},[763],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","2025-07-07","[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) is transforming\nhow we build intelligent applications, but deploying AI agents securely and\nefficiently can be challenging. In this tutorial, you'll learn how to deploy\nan AI agent built with Google's Agent Development Kit\n([ADK](https://cloud.google.com/vertex-ai/generative-ai/docs/agent-development-kit/quickstart))\nto Cloud Run using [GitLab's native\nintegrations](https://cloud.google.com/blog/topics/partners/understand-the-google-cloud-gitlab-integration)\nand [CI/CD components](https://docs.gitlab.com/ci/components/).\n\n\n## What are AI agents and why do they matter?\n\n\nAgentic AI represents a significant evolution in artificial intelligence. Unlike traditional generative AI tools that require constant human direction, AI agents leverage advanced language models and natural language processing to take independent action. These systems can understand requests, make decisions, and execute multistep plans to achieve goals autonomously.\n\n\nThis tutorial uses Google's ADK, a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and built for compatibility with other frameworks.\n\n\n## Our demo application: Canada City Advisor\n\n\nTo demonstrate the deployment process, we'll work with a practical example: the Canada City Advisor. This AI agent helps users find their ideal Canadian city based on their preferences and constraints.\n\n\nHere's how it works:\n\n\n* Users input their budget requirements and lifestyle preferences.  \n\n* The root agent coordinates two sub-agents:  \n\n  * A budget analyzer agent that evaluates financial constraints. This draws data obtained from the Canada Mortgage and Housing Corporation.  \n  * A lifestyle preferences agent that matches cities to user needs. This includes a weather service that uses [Open-Meteo](https://open-meteo.com/) to get the proper city information.  \n* The system generates personalized city recommendations\n\n\nThis multi-agent architecture showcases the power of agentic AI - different specialized agents working together to solve a complex problem. The sub-agents are only invoked when the root agent determines that budget and lifestyle analysis are needed.\n\n\n![Multi-agent architecture to develop demo application with agentic AI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1751576568/obgxpxvlnxtzifddrrz1.png)\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* A Google Cloud project with the following APIs enabled:  \n\n  * Cloud Run API  \n  * Artifact Registry API  \n  * Vertex AI API  \n* A GitLab project for your source code  \n\n* Appropriate permissions in both GitLab and Google Cloud\n\n\n**Step 1: Set up IAM integration with Workload Identity Federation**\n\n\nThe first step establishes secure, keyless authentication between GitLab and Google Cloud using [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). This eliminates the need for service account keys and improves security.\n\n\nIn your GitLab project:\n\n\n1. Navigate to **Settings > Integrations > Google Cloud IAM.**  \n\n2. Provide the following information:  \n\n   * **Project ID**: Your Google Cloud project ID  \n   * **Project Number**: Found in your Google Cloud console  \n   * **Pool ID**: A unique identifier for your workload identity pool  \n   * **Provider ID**: A unique identifier for your identity provider\n\nGitLab will generate a script for you. Copy this script and run it in your Google Cloud Shell to create the Workload Identity Federation.\n\n\n**Step 2: Configure Google Artifact Registry integration**\n\n\nNext, we'll set up the connection to Google Artifact Registry where our container images will be stored.\n\n\n1. In GitLab, go to **Settings > Integrations > Google Artifact Registry.**  \n\n2. Enter:  \n\n   * **Google Cloud Project ID**: Same as in Step 1  \n   * **Repository Name**: Name of an existing Artifact Registry repository  \n   * **Location**: The region where your repository is located\n\n**Important**: The repository must already exist in Artifact Registry. GitLab won't create a new one for you in this context.\n\n\nGitLab will generate commands to set up the necessary permissions. Run these in Google Cloud Shell.\n\n\nAdditionally, add these roles to your service principal for Cloud Run deployment:\n\n\n* `roles/run.admin`  \n\n* `roles/iam.serviceAccountUser`  \n\n* `roles/cloudbuild.builds.editor`\n\n\nYou can add these roles using the following gcloud commands:\n\n\n```shell\n\nGCP_PROJECT_ID=\"\u003Cyour-project-id>\" #replace\n\nGCP_PROJECT_NUMBER=\"\u003Cyour-project-number>\" #replace\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003Cyour-pool-id>\" #replace\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n**Step 3: Create the CI/CD pipeline**\n\n\nNow for the exciting part – let's build our deployment pipeline! GitLab's CI/CD components make this remarkably simple.\n\n\nCreate a `.gitlab-ci.yml` file in your project root:\n\n\n```unset\n\nstages:\n  - build\n  - test\n  - upload\n  - deploy\n\nvariables:\n  GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n  AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n  image: docker:24.0.5\n  stage: build\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $GITLAB_IMAGE .\n    - docker push $GITLAB_IMAGE\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n  - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n    inputs:\n      stage: upload\n      source: $GITLAB_IMAGE\n      target: $AR_IMAGE\n  - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n    inputs:\n      stage: deploy\n      project_id: \"\u003Cyour-project-id>\" #replace\n      service: \"canadian-city\"\n      region: \"us-central1\"\n      image: $AR_IMAGE\n```\n\n\nThe pipeline consists of four stages:\n\n\n1. **Build**: Creates the Docker container with your AI agent  \n\n2. **Test**: Runs security scans (container scanning, dependency scanning, SAST)  \n\n3. **Upload**: Pushes the container to Artifact Registry  \n\n4. **Deploy**: Deploys to Cloud Run\n\n\nThe great thing about using [GitLab's CI/CD components](https://docs.gitlab.com/ci/components/) is that you only need to provide a few parameters - the components handle all the complex authentication and deployment logic.\n\n\n**Step 4: Deploy and test**\n\n\nWith everything configured, it's time to deploy:\n\n\n1. Commit your code and `.gitlab-ci.yml` to your GitLab repository.  \n\n2. The pipeline will automatically trigger.  \n\n3. Monitor the pipeline progress in GitLab's CI/CD interface.  \n\n4. Once complete, find your Cloud Run URL in the Google Cloud Console.\n\n\nYou'll see each stage execute:\n\n\n* Build stage creates your container.  \n\n* Test stage runs comprehensive security scans.  \n\n* Upload stage pushes to Artifact Registry.  \n\n* Deploy stage creates or updates your Cloud Run service.\n\n\n## Security benefits\n\n\nThis approach provides several security advantages:\n\n\n* **No long-lived credentials:** Workload Identity Federation eliminates service account keys.  \n\n* **Automated security scanning:** Every deployment is scanned for vulnerabilities.  \n\n* **Audit trail:** Complete visibility of who deployed what and when.  \n\n* **Principle of least privilege:** Fine-grained IAM roles limit access.\n\n\n## Summary\n\nBy combining GitLab's security features with Google Cloud's powerful AI and serverless platforms, you can deploy AI agents that are both secure and scalable. The integration between GitLab and Google Cloud eliminates much of the complexity traditionally associated with such deployments.\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/ai-agent-deployment)\nto get started now. Not a GitLab customer yet? Explore the DevSecOps platform with [a free trial](https://about.gitlab.com/free-trial/).\n","ai-ml",[727,9,723],{"featured":6,"template":700,"slug":770},"fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab","content:en-us:blog:fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","Fast And Secure Ai Agent Deployment To Google Cloud With Gitlab","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"_path":776,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":777,"content":783,"config":790,"_id":792,"_type":13,"title":793,"_source":15,"_file":794,"_stem":795,"_extension":18},"/en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"title":778,"description":779,"ogTitle":778,"ogDescription":779,"noIndex":6,"ogImage":780,"ogUrl":781,"ogSiteName":686,"ogType":687,"canonicalUrls":781,"schema":782},"Fast Python Flask server deployment with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Python Flask server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098427/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098427691.png","https://about.gitlab.com/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Python Flask server deployment with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2024-11-04\",\n      }",{"title":778,"description":779,"authors":784,"heroImage":780,"date":786,"body":787,"category":695,"tags":788},[745,785],"Jerez Solis","2024-11-04","Deploying an application to the cloud often requires assistance from\nproduction or DevOps engineers. GitLab's Google Cloud integration empowers\ndevelopers to handle deployments independently. In this tutorial, you'll\nlearn how to deploy a Python Flask server to Google Cloud in less than 10\nminutes. Whether you’re a solo developer or part of a large team, this setup\nallows you to deploy applications efficiently.\n\n\nYou'll learn how to:\n\n\n- Create a new project in GitLab\n\n- Create a Flask server utilizing `main.py`\n\n- Utilize the Google Cloud integration to create a Service account\n\n- Utilize the Google Cloud integration to create Cloud Run via a merge\nrequest\n\n- Access your newly deployed Flask server\n\n- Clean up your environment\n\n\n## Prerequisites:\n\n- Owner access on a Google Cloud Platform project\n\n- Working knowledge of Python\n\n- Working knowledge of GitLab CI\n\n- 10 minutes\n\n\n## Step-by-step Python Flask server deployment to Google Cloud\n\n\n**1. Create a new project in GitLab.**\n\n\nWe decided to call our project \"python-flask-cloud-run\" for simplicity.\n\n\n![python flask server - create a new project in\nGitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098438036.png)\n\n\n**2. Create a flask server utilizing main.py demo.**\n\n\nFind the `main.py` demo here:\n[https://gitlab.com/demos/applications/python-flask-cloud-run](https://gitlab.com/demos/applications/python-flask-cloud-run).\n\n\n```python\n\nimport os\n\n\nfrom flask import Flask\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\n\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\nif __name__ == \"__main__\":\n    app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n```\n\n\n**3. Create a `requirements.txt` with the following dependencies.**\n\n\n```\n\nFlask==3.0.3\n\ngunicorn==22.0.0\n\nWerkzeug==3.0.3\n\n```\n\n\n**4. Utilizing the Google Cloud integration, create a Service account.**\n\n\nNavigate to **Operate > Google Cloud > Create Service account**.\n\n\n![python flask server - create service\naccount](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098438037.png)\n\n\n**5. Also configure the region you would like the Cloud Run instance to\ndeploy to.**\n\n\n![python flask server - configure the\nregion](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098438038.png)\n\n\n**6. Utilizing the Google Cloud integration, configure Cloud Run via merge\nrequest.**\n\n\n![python flask server -\ndeployments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098438041.png)\n\n\n**7. This will open a merge request. Immediately merge this merge request.**\n\n\n![python flask server - enable deployments to Cloud\nRun](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098438043.png)\n\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`,\n`GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the\nprevious steps.\n\n\n![python flask server -\nvariables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098438044.png)\n\n\n**8. Voila! Check your pipeline and you will see you have successfully\ndeployed to Google Cloud Run utilizing GitLab CI.**\n\n\n![python flask server - update\ndockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098438045.png)\n\n\n\u003Cbr>\u003C/br>\n\n\n![python flask server -\ndockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098438046.png)\n\n\n**9. Click the Service URL to view your newly deployed Flask server.**\n\n\nNavigate to **Operate > Environments** to see a list of deployments for your\nenvironments.\n\n\n![python flask server - deployments\nlist](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098438047.png)\n\n\nBy clicking on the environment called **main**, you’ll be able to view a\ncomplete list of deployments specific to that environment.\n\n\n![python flask server - main job\nlisting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098438048.png)\n\n\n## Next steps\n\n\nTo get started with developing your Flask application, try adding another\nendpoint. For instance, in your `main.py` file, you can add a **/bye**\nendpoint as shown below:\n\n\n```\n\n@app.route(\"/\")\n\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\n```\n\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy\nthe updates. Once it’s complete, go back to the Service URL and navigate to\nthe **/bye** endpoint to see the new functionality in action.\n\n\n## Clean up\n\n\nTo prevent incurring charges on your Google Cloud account for the resources\nused in this tutorial, you can either delete the specific resources or\ndelete the entire Google Cloud project. For detailed instructions, refer to\nthe [cleanup\nguide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n\n> For more DevSecOps capabilities, [start a free trial of GitLab\nUltimate and GitLab\nDuo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com/blog/%2F).\n",[723,789,9,749],"cloud native",{"slug":791,"featured":90,"template":700},"fast-python-flask-server-deployment-with-gitlab-google-cloud","content:en-us:blog:fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","Fast Python Flask Server Deployment With Gitlab Google Cloud","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"_path":797,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":798,"content":804,"config":812,"_id":814,"_type":13,"title":815,"_source":15,"_file":816,"_stem":817,"_extension":18},"/en-us/blog/gcp-move-update",{"title":799,"description":800,"ogTitle":799,"ogDescription":800,"noIndex":6,"ogImage":801,"ogUrl":802,"ogSiteName":686,"ogType":687,"canonicalUrls":802,"schema":803},"Update on our planned move from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform August 11 – here’s what this means for you now and in the future.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/gcp-move-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update on our planned move from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-07-19\",\n      }",{"title":799,"description":800,"authors":805,"heroImage":801,"date":807,"body":808,"category":695,"tags":809},[806],"David Smith","2018-07-19","\n\nNOTE to users in Crimea, Cuba, Iran, North Korea, Sudan, and Syria: GitLab.com may\nnot be accessible after the migration to Google. Google has informed us that\nthere are legal restrictions that are imposed for those countries. See this\n[U.S. Department of the Treasury link](http://www.treasury.gov/resource-center/sanctions/Programs/Pages/Programs.aspx)\nfor more details. At this time, we can only recommend that you download\nyour code or export relevant projects as a backup. See [this issue](https://gitlab.com/gitlab-com/migration/issues/649)\nfor more discussion.\n{: .alert .alert-warning}\n\nUpdate as of August 1: There will be a short maintenance window on Saturday, August 4 at 13:00 UTC. We will perform a test of approximately 1 hour.  This will help us verify some of our fixes to make sure the switchover goes as planned.\n{: .alert .alert-info}\n\nUpdate as of July 27: There will be a short maintenance window on Saturday, July 28 at 13:00 UTC. We will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n{: .alert .alert-info}\n\nUpdate as of July 24: Following our dry run of the migration on Saturday, July 21, we have rescheduled the migration with a new target date of Saturday, August 11. You can read through [our findings document](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit?usp=sharing) for all the details.\n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: running GitLab as a [cloud native](/topics/cloud-native/) application on Kubernetes.\n\nThe next incremental step on our cloud native journey is a big one: migrating from Azure to Google Cloud Platform (GCP). While Azure has been a great provider for us, GCP has the best Kubernetes support and we believe will the best provider for our long-term plans. In the short term, our users will see some immediate benefits once we cut over from Azure to GCP including encrypted data at rest on by default and faster caching due to GCP's tight integration with our existing CDN.\n\n## Upcoming maintenance windows for the GCP migration\n\nAs an update to [our earlier blog post on the migration](/blog/moving-to-gcp/), this is a short post to let our community know we are planning on performing the migration of GitLab.com the weekend of ~~July 28~~ August 11 (this has been rescheduled following our dry run on July 21). We have a maintenance window coming up that we would like to make sure everybody knows about.\n\n### What you need to know:\n\nDuring the maintenance windows, the following services will be unavailable:\n\n* SaaS website ([GitLab.com](https://gitlab.com/) will be offline, but [about.gitlab.com](https://about.gitlab.com/) and [docs.gitlab.com](https://docs.gitlab.com/) will still be available)\n* Git ssh\n* Git https\n* registry\n* CI/CD\n* Pages\n\n### Maintenance window - Dry run - Saturday, July 21 at 13:00 UTC\n\nAs a further update to our testing, we are planning to take a short maintenance window this weekend on Saturday, July 21 at 13:00 UTC to do final readiness checks.\nThis maintenance window should last one hour.\n\n2018-07-23 UDPATE: Here are the [finding from the maintenance window](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit). We've decided to push our target date from July 28th to August 11th to comfortably address several issues. We will likely do a small maintenance window on Saturday, July 28th, and another full practice on Saturday, August 4th.\n\n### Maintenance window - Short test - Saturday, July 28 at 13:00 UTC\n\nWe will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n\n\n### Maintenance window - Dry run - Saturday, August 4 at 13:00 UTC\n\nWe will repeat the dry run exercise again to have a chance to verify our changes to the switchover plan.\n\n\n### Maintenance window - Actual switchover - Saturday, ~~July 28~~ August 11 at 10:00 UTC\n\nOn the day of the migration, we are planning to start at 10:00 UTC.  The time window for GitLab.com to be in maintenance is currently planned to be two hours.  Should any times for this change, we will be updating on the channels listed below. When this window is completed GitLab.com will be running out of GCP.\n\n* [GitLab Status page](https://status.gitlab.com/)\n* [GitLab Status Twitter](https://twitter.com/gitlabstatus)\n\n### GitLab Pages and custom domains\n\nIf you have a custom domain on [GitLab Pages](https://about.gitlab.comhttps://docs.gitlab.com/ee/user/project/pages/):\n\n* We will have a proxy in place so you do not have to change your DNS immediately.\n* GitLab Pages will ultimately go to 35.185.44.232 after the July 28 migration.\n* Do not change your DNS to this new address until we have successfully completed the migration.\n* We will post an update to our blog about when the cutoff will be for changing DNS from our Azure address to GCP for GitLab Pages.\n\nShould you need support during the migration, please reach out to [GitLab Support](https://about.gitlab.com/support/).\n\nWish us luck!\n",[9,789,810,811],"GKE","kubernetes",{"slug":813,"featured":6,"template":700},"gcp-move-update","content:en-us:blog:gcp-move-update.yml","Gcp Move Update","en-us/blog/gcp-move-update.yml","en-us/blog/gcp-move-update",{"_path":819,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":820,"content":826,"config":833,"_id":835,"_type":13,"title":836,"_source":15,"_file":837,"_stem":838,"_extension":18},"/en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"title":821,"description":822,"ogTitle":821,"ogDescription":822,"noIndex":6,"ogImage":823,"ogUrl":824,"ogSiteName":686,"ogType":687,"canonicalUrls":824,"schema":825},"Why we enabled Geo on the staging environment for GitLab.com","Geo is GitLab's solution for distributed teams and now we can validate and test it at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669656/Blog/Hero%20Images/donald-giannatti-4qk3nQI3WHY-unsplash-small.jpg","https://about.gitlab.com/blog/geo-is-available-on-staging-for-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we enabled Geo on the staging environment for GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabian Zimmer\"},{\"@type\":\"Person\",\"name\":\"Douglas Alexandre\"}],\n        \"datePublished\": \"2020-04-16\",\n      }",{"title":821,"description":822,"authors":827,"heroImage":823,"date":830,"body":831,"category":695,"tags":832},[828,829],"Fabian Zimmer","Douglas Alexandre","2020-04-16","We're testing Geo at scale on GitLab.com – our largest installation of\nGitLab – because we believe the best way to guarantee that Geo works as\nexpected is to [use it\nourselves](/handbook/product/product-processes/#dogfood-everything).\n\n\nGeo is GitLab's [solution for distributed teams](https://docs.gitlab.com/ee/administration/geo/index.html). We want\nteams all over the world to have a great user experience - independent of\nhow far away users are from their primary GitLab installation. To accomplish\nthis goal, read-only Geo nodes can be created across the world in close\ngeographical proximity to your teams. These Geo nodes replicate important\ndata, such as projects or LFS files, from the primary GitLab instance and\nthereby make the data available to users. Geo can also be used as part of a\ndisaster recovery strategy because it adds data redundancy. Geo nodes follow\nthe primary installation closely and allow customers to failover to this\nnode in case the primary node becomes unavailable.\n\n\nMany of GitLab's customers use Geo on self-managed installations that serve\nhundreds to thousands of users. Geo is a critical component of GitLab\ninstallations and our customers expect Geo to work at any scale. We are\ntesting Geo at scale on our GitLab.com installation because if it works for\nus, chances are it will work for our worldwide group of users too.\n\n\nIn this blog post, we'll explain why and how we chose to enable GitLab Geo\non our pre-production environment (from now on referred to as \"staging\"),\nthe challenges we encountered, some of the immediate benefits to our\ncustomers, and what will be next.\n\n\n## Why do we need to use Geo at GitLab?\n\nIn order to build the best product possible, we believe it is imperative to\n[use GitLab\nourselves](/handbook/product/product-processes/#dogfood-everything). Many of\nour Geo customers have thousands of users actively using GitLab and a major\nchallenge for the team was to test and validate new Geo functionality at\nscale. Enabling Geo on the GitLab.com staging environment makes this task a\nlot easier.\n\n\nWe also used Geo to [migrate GitLab.com from Microsoft Azure to Google Cloud\nin 2018](/blog/moving-to-gcp/), which allowed us to improve the product by\nidentifying bottlenecks. In the last two years, GitLab has grown\ndramatically and in order to push Geo forward, we need to enable it (again).\n\n\n### Test Geo at scale\n\nWhen the team decides to add new functionalities to Geo, for example\n[package repository\nreplication](https://gitlab.com/groups/gitlab-org/-/epics/2346), we had to\nensure that the feature's performance is as expected. Having Geo available\non staging allows us to deploy these changes behind a feature flag first and\nevaluate the performance before shipping the feature to customers. This is\nespecially relevant to some of Geo's PostgreSQL database queries. On a small\ntest deployment, things may look fine, but at scale these queries can time\nout, resulting in replication issues.\n\n\nWe also deploy code to our staging environment twice a week, which means\nthat any regressions surface before a new packaged release.\n\n\n### Prove that Geo can be deployed as part of our production infrastructure\n\nA large amount of automation is required to run GitLab.com with millions of\nusers, and our SRE team is constantly improving how we run GitLab.com. The\nfirst step bringing Geo into our production environment is to deploy Geo as\na part of our staging environment. Without the right monitoring, runbooks,\nand processes in place, it would not be possible to move Geo into production\nwhere it could be used to enable geo-replication and/or as part of our\ndisaster recovery strategy.\n\n\n## Setting up Geo on staging\n\n\nSetting up Geo on staging had some unique challenges, you can get a detailed\noverview in our [Geo on staging\ndocumentation](/handbook/engineering/development/enablement/systems/geo/staging.html).\n\n\nIn order to deploy Geo, we opted for a minimally viable approach that is\nsufficient for a first iteration. Geo is currently deployed as a single\nall-in-one box, not yet as a [Geo high-availability\nconfiguration](https://docs.gitlab.com/ee/administration/geo/replication/multiple_servers.html).\nGeo deploys happen automatically via Chef, similar to any other part of the\ninfrastructure.\n\n\n![Geo staging\nDiagram](https://about.gitlab.com/images/blogimages/geo-on-staging/geo_staging_diagram.png){:\n.shadow.medium.center}\n\n\nWe currently replicate only a subset of data using [Geo's selective\nsynchronization\nfeature](https://docs.gitlab.com/ee/administration/geo/replication/configuration.html#selective-synchronization),\nwhich also allows us to dogfood this feature. Selective synchronization uses\na number of complex database queries and this helps us validate those at\nscale. We chose to replicate the `gitlab-org` group, which contains mostly\nof GitLab's projects (including\n[GitLab](https://gitlab.com/gitlab-org/gitlab) itself).\n\n\nWe also needed to configure Geo to use the same logical [Gitaly\nshards](https://docs.gitlab.com/ee/administration/repository_storage_paths.html)\non the secondary compared to the primary node. We'll [improve our Geo\ndocumentation](https://gitlab.com/gitlab-org/gitlab/-/issues/213840) to\nensure it is clear when this is required.\n\n\nA logical Gitaly shard is an entry in the GitLab configuration file that\npoints to a path on the file system and a Gitaly address:\n\n\n```\n\n\"git_data_dirs\": {\n  \"default\": {\n    \"path\": \"/var/opt/gitlab/git-data-file01\",\n    \"gitaly_address\": \"unix:/var/opt/gitlab/gitaly/gitaly.socket\"\n  }\n}\n\n```\n\n\nIn the example above, we have only one logical shard identified by the key\n`default`, but we could have as many as needed.\n\nEvery project on GitLab is associated with a logical Gitaly shard, which\nmeans that we know where all relevant data (repositories, uploads, etc.) is\nstored. A project `example` that is associated with the logical Gitaly shard\n`default`, would therefore be stored at `/var/opt/gitlab/git-data-file01`\nand the Gitaly server would be available at\n`/var/opt/gitlab/git-data-file01`.\n\n\nThis information is stored in the PostgreSQL database and in order for Geo\nto replicate projects successfully we needed to create the same Gitaly shard\nlayout. On the Geo secondary node, we are using only one physical shard to\nstore the data for all projects. To allow it to replicate any project from\nthe primary node, we had to point all the logical Gitaly shards to the same\nphysical shard on the secondary node.\n\n\nGeo on staging is configured to use [cascading streaming\nreplication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION),\nwhich allows one standby node in the staging [Patroni\ncluster](https://github.com/zalando/patroni) to act as relay and stream\nwrite-ahead logs (WAL) to the Geo secondary. This setup also has the\nadvantage that Geo can't put an additional load onto the primary database\nnode and we are also not using physical replication slots to further reduce\nthe load. [Patroni will likely be supported in Omnibus\npackages](https://gitlab.com/groups/gitlab-org/-/epics/2588) and we will\nreview these settings to allow our customers to benefit from this setup.\n\n\nPostgreSQL will automatically fall back on its `restore_command` to pull\narchived WAL segments using [wal-e](https://github.com/wal-e/wal-e), if it\ncannot retrieve the segment by streaming replication. This can happen after\na failover, or if the replication target has deleted the relevant segment if\nGeo is lagging behind it.\n\n\nIn the future, we will use this to experiment with [high-availability\nconfigurations of PostgreSQL on a secondary Geo\nnode](https://gitlab.com/groups/gitlab-org/-/epics/2536).\n\n\n## What we learned and how we can improve\n\n\nWe opened [23 issues before successfully rolling out Geo on our staging\nenvironment](https://gitlab.com/groups/gitlab-org/-/epics/1908) - this is\ntoo many. We know that installing and configuring Geo in complex\nenvironments is time-consuming and error-prone, and is an area where we can\nimprove. The current process for a self-managed installation requires [more\nthan 70 individual\nsteps](https://gitlab.com/gitlab-org/gitlab-design/issues/731) - this is too\nmuch. [Geo should be simple to\ninstall](https://gitlab.com/groups/gitlab-org/-/epics/1465) and we aim to\nreduce the number of steps to below 10. Using Geo ourselves really\nunderscored the importance of improvements in this area.\n\n\n### Some Geo PostgreSQL queries don't perform well\n\n\nGeo uses PostgreSQL Foreign Data Wrappers (FDW) to perform some\ncross-database queries between the secondary replica and the tracking\ndatabase. FDW queries are quite elegant but have lead to some issues in the\npast. Specifically, staging is still running PostgreSQL 9.6, and Geo\nbenefits from some FDW improvements available only in PostgreSQL 10 and\nlater, such as join push-down and aggregate push-down.\n\n\nWhile enabling Geo on staging, some FDW queries timed out during the\nbackfill phase. Until staging is being upgraded to a newer version of\nPostgreSQL, increasing the statement timeout to 20 minutes on the Geo\nsecondary node was sufficient to allow us to proceed with the backfill.\n\n\nAs a direct consequence of enabling GitLab on staging, we are working to\n[improve Geo scalability by simplifying backfill\noperations](https://gitlab.com/groups/gitlab-org/-/epics/2851), eliminating\nthese cross-database queries, and removing the FDW requirement. We also plan\nto [upgrade to PostgreSQL 11 in GitLab\n13.0](https://gitlab.com/groups/gitlab-org/-/epics/2414).\n\n\n### Bug fixes\n\nWe've also discovered and fixed a number of bugs in the process, such as\n[failing to synchronize uploads with missing mount\npoints](https://gitlab.com/gitlab-org/gitlab/-/issues/209752), [invalid\nActiveRecord\noperations](https://gitlab.com/gitlab-org/gitlab/-/issues/210589), and\n[excessively re-synchronizing files in some\nsituations](https://gitlab.com/gitlab-org/gitlab/-/issues/207808).\n\n\n## What's next?\n\nWe are already providing value to our customers by enabling Geo on staging\nbecause the Geo team can test and validate Geo at scale at lot easier. Next\nup is enabling [automatic runs of our end-to-end test on\nstaging](https://gitlab.com/gitlab-org/quality/team-tasks/issues/385), which\nwould reduce the manual testing burden even further. There are also some\nother improvements, such as [enabling high-availability configurations of\nPostgreSQL using Patroni on Geo\nnodes](https://gitlab.com/groups/gitlab-org/-/epics/2536) that we would like\nto test on staging.\n\n\nEven though enabling Geo on staging is already very useful, it is just a\nstep forward to rolling out Geo on GitLab.com in production. We are\ncurrently evaluating the business case for enabling Geo on GitLab.com as\npart of our disaster recovery strategy and for geo replication.\n\n\nCover image by [Donald Giannatti](https://unsplash.com/photos/4qk3nQI3WHY)\non [Unsplash](https://www.unsplash.com)\n\n{: .note}\n",[697,722,9],{"slug":834,"featured":6,"template":700},"geo-is-available-on-staging-for-gitlab-com","content:en-us:blog:geo-is-available-on-staging-for-gitlab-com.yml","Geo Is Available On Staging For Gitlab Com","en-us/blog/geo-is-available-on-staging-for-gitlab-com.yml","en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"_path":840,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":841,"content":847,"config":856,"_id":858,"_type":13,"title":859,"_source":15,"_file":860,"_stem":861,"_extension":18},"/en-us/blog/get-started-with-microservices-architecture",{"title":842,"description":843,"ogTitle":842,"ogDescription":843,"noIndex":6,"ogImage":844,"ogUrl":845,"ogSiteName":686,"ogType":687,"canonicalUrls":845,"schema":846},"Get started with microservices architecture","For DevOps teams ready to take the next step, adopting a microservices architecture is a smart choice. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667875/Blog/Hero%20Images/trends-in-version-control-land-microservices-cover.jpg","https://about.gitlab.com/blog/get-started-with-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with microservices architecture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":842,"description":843,"authors":848,"heroImage":844,"date":850,"body":851,"category":852,"tags":853},[849],"GitLab","2022-09-20","A great way to jumpstart a DevOps practice is by adopting a microservices architecture. The [benefits of a microservices architecture](/blog/what-are-the-benefits-of-a-microservices-architecture/) are numerous and include improved scalability, enhanced fault isolation, and the ability to bring new features to market faster.\n\n## How to start building with microservices architecture\n\n### Identify decomposable aspects of the application\n\nOne of the main properties of a microservice is its independence, so identifying the decomposable parts of the application — those parts that can work autonomously — is essential. Getting the service boundaries wrong could result in unwanted changes to other services, so you need to understand the system’s domain.\n\nIn many cases, such breakdown aligns with the business domains and is reflected in development teams.\n\n### Determine the metrics to monitor\n\nWith a microservices application, it’s crucial to monitor the status of each service so it’s possible to react to changing demands in the production environment. Some common metrics to monitor include the CPU and memory usage of each host, the API response time, and the error rate.\n\nWithout monitoring, teams may not catch problems when they arise. For example, if a server is overwhelmed by traffic, other services may not respond because they’re trying to communicate with an over-burdened service. \n\nBeing able to visualize these potential issues helps prevent downtime. Therefore, establish metrics early so necessary adjustments can be made as soon as possible.\n\n## Best practices for deploying and managing microservices\n\n### Infrastructure automation\n\nWhen the number of microservices grows, an application can become difficult to manage. Each microservice has its own deployment schedule. \n\nSome features are hidden behind feature flags, some are collecting usage data through A/B testing, and some services might be using Canary deployments as part of a progressive deployment. \n\nAutomated testing is key so teams will have the ability to stop or roll back deployment when necessary.\n\n### Consumer-driven contract tests\n\nWhen other consumers depend on API endpoints in one microservice, it’s good practice to implement consumer-driven contract testing to ensure version compatibility. \n\nTraditionally, developers first create the APIs on the server side and have clients determine which endpoints to call. That means when the signature of an API changes, it can bring down the consumer.\n\nThis can’t happen with consumer-driven contract testing because, before deploying a microservice to production, consumers determine the required contract (API signature) and test to be sure they are still valid.\n\n### Monitor key metrics\n\nOnce key metrics have been determined, they must be constantly monitored and able to respond to any events detected. This can be difficult, but fortunately, there are tools that simplify monitoring and provide comprehensive visualization.\n\n## Microservices architecture and DevOps\n\nBy decomposing a software system into autonomous parts, [microservices architecture](/topics/microservices/) allows companies to apply the single responsibility principle to individual teams. It allows them to manage all aspects of a service independently: the team’s technical stack, team composition, deployment strategies, and even release schedules.\n\nMicroservices architecture, alongside continuous delivery, allows businesses to make decisions based on live production data, thereby expediting feedback loops and reducing the time to market.\n\nTo get started with microservices architecture, it’s a good idea to first develop strong intuitions in decomposing a large system and get a good knowledge base of CI/CD practices. Regardless of the architectural style you choose, these skills will be useful.","devsecops",[854,855,9],"DevOps","testing",{"slug":857,"featured":6,"template":700},"get-started-with-microservices-architecture","content:en-us:blog:get-started-with-microservices-architecture.yml","Get Started With Microservices Architecture","en-us/blog/get-started-with-microservices-architecture.yml","en-us/blog/get-started-with-microservices-architecture",{"_path":863,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":864,"content":869,"config":875,"_id":877,"_type":13,"title":878,"_source":15,"_file":879,"_stem":880,"_extension":18},"/en-us/blog/getting-started-gitlab-ci-gcp",{"title":865,"description":866,"ogTitle":865,"ogDescription":866,"noIndex":6,"ogImage":801,"ogUrl":867,"ogSiteName":686,"ogType":687,"canonicalUrls":867,"schema":868},"Getting started with GitLab CI/CD and Google Cloud Platform","Discover how easy it is to set up CI/CD and Kubernetes deployment with our integration with Google Kubernetes Engine.","https://about.gitlab.com/blog/getting-started-gitlab-ci-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab CI/CD and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-04-24\",\n      }",{"title":865,"description":866,"authors":870,"heroImage":801,"date":871,"body":872,"category":695,"tags":873},[849],"2018-04-24","\n\nEarlier this month [we announced our new native integration with Google Kubernetes Engine (GKE)](/blog/gke-gitlab-integration/),\nallowing you to [set up CI/CD](/topics/ci-cd/) and Kubernetes deployment in just a few clicks. If you're new to\nGitLab CI on Google Cloud Platform (GCP), we've put together a quick [demo](#demo) and [instructions](#instructions) you can view below. For a more detailed walkthrough and the chance to ask questions, join us on April 26 for a [live demo](#join-google-and-gitlab-for-a-live-demo).\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/u3jFf3tTtMk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Instructions\n\n### Add a Kubernetes Engine cluster\n\nHead on over to the CI/CD -> Kubernetes menu option in the GitLab UI. Here you can add your existing cluster to your project or create a brand new one.\n\n![Add your Kubernetes cluster](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step1.png){: .shadow.center.medium}\n\nOnce connected, you can install applications like [Helm Tiller](https://helm.sh/), [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), [Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/), and [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/) to your cluster with just one click.\n\n![Install applications](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/install-applications.png){: .shadow.center.medium}\n\n### Enable Auto DevOps\n\nWe've also worked with Google to integrate [GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with GKE. Using them together, you'll have a continuous deployment pipeline that automatically creates a [review app](https://docs.gitlab.com/ee/ci/review_apps/) for each merge request and once you merge, deploys the application into production on production-ready GKE.\n\nTo get started, go to CI/CD -> General pipeline settings, and select “Enable Auto DevOps.” For more information, read the [Auto DevOps docs](https://docs.gitlab.com/ee/topics/autodevops/).\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step2.png){: .shadow.center.medium}\n\nAuto DevOps takes the manual work out of CI/CD by automatically detecting what languages you’re using, and configuring a continuous integration and continuous deployment pipeline that results in your app running live on the Kubernetes Engine cluster.\n\n![Review pipeline](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step3.png){: .shadow.center.medium}\n\nNow, whenever you create a merge request, we'll run a review pipeline to deploy a review app to your cluster where you can preview your changes. When you merge the code, GitLab will run a production pipeline to deploy your app to production, running on Kubernetes Engine!\n\n## Get $500 credit for your project\n\nEvery new Google Cloud Platform account receives $300 in credit [upon signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, we're offering an additional $200 for both new and existing GCP accounts to get started with the GKE integration. Here's a link to [apply for your $200 credit](https://goo.gl/AaJzRW).\n\n## Join Google and GitLab for a live demo\n\nJoin Google’s [William Denniss](https://www.linkedin.com/in/williamdenniss/) and GitLab’s [William Chia](https://www.linkedin.com/in/williamchia/) for a walkthrough of the integration on April 26. You’ll learn how easy it is to set up a Kubernetes cluster, how to deploy your app using GitLab CI/CD, and how GKE enables you to deploy, update, and manage containerized applications at scale.\n\n[Register today](/webcast/scalable-app-deploy/)!\n",[9,810,811,874],"demo",{"slug":876,"featured":6,"template":700},"getting-started-gitlab-ci-gcp","content:en-us:blog:getting-started-gitlab-ci-gcp.yml","Getting Started Gitlab Ci Gcp","en-us/blog/getting-started-gitlab-ci-gcp.yml","en-us/blog/getting-started-gitlab-ci-gcp",{"_path":882,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":883,"content":889,"config":897,"_id":899,"_type":13,"title":900,"_source":15,"_file":901,"_stem":902,"_extension":18},"/en-us/blog/gitlab-and-google-together-at-google-cloud-next-23",{"title":884,"description":885,"ogTitle":884,"ogDescription":885,"noIndex":6,"ogImage":886,"ogUrl":887,"ogSiteName":686,"ogType":687,"canonicalUrls":887,"schema":888},"GitLab and Google together at Google Cloud Next '23","Here's a roundup of all the GitLab events and announcements at the Next ‘23 conference.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679290/Blog/Hero%20Images/gitlabgooglecloud.png","https://about.gitlab.com/blog/gitlab-and-google-together-at-google-cloud-next-23","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Google together at Google Cloud Next '23\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nima Badiey\"}],\n        \"datePublished\": \"2023-08-22\",\n      }",{"title":884,"description":885,"authors":890,"heroImage":886,"date":892,"body":893,"category":894,"tags":895},[891],"Nima Badiey","2023-08-22","\nAfter a pandemic-related hiatus from in-person events, we’re excited that Google Cloud Next ‘23 is back in person Aug. 29 - 31 at the Moscone Center in San Francisco – and GitLab will be there. Next ’23  promises to be a packed event, with exciting announcements and new product introductions from Google and its partners. \n\nIf you’re going to Next ‘23, here’s a quick summary of where to find GitLab at the event, including speaking sessions, our booth in the expo hall, and our storefront, to learn more about the [most comprehensive AI-powered DevSecOps Platform](https://about.gitlab.com). And don’t forget to check out the [GitLab at Next '23 event page](https://about.gitlab.com/events/) for updates and invites!\n\n### All week\nJoin us at our booth #633 on the expo floor to meet the GitLab team and learn how GitLab and Google Cloud are partnering to deliver secure, enterprise-grade AI. Talk to DevSecOps experts, dive into our new AI capabilities built directly into the platform, and learn best practices you can apply to your own environment. Get all your technical questions answered, and let us know what features you'd like to see in the GitLab platform!\n* We also have a Pop-Up Meeting Experience at the 4th Street Entrance to the [Metreon](https://www.shoppingmetreon.com/). Our team is providing demos and Q&A for [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered capabilities that can enhance your workflows throughout the software development lifecycle. Register for a coffee chat in our exclusive gathering space to start your day off right!\n* GitLab team members are available all week to meet customers, partners, and fellow Google sellers, so be sure to ask your GitLab sales representatives who and how to connect with them in person.\n\n### Tuesday, Aug. 29\n* If you’re attending the Executive Women’s Network breakfast, be sure to say hi to our Patty Cheung, Vice President of Sales for Channel and Alliances. Patty recently joined the GitLab team and is focusing her team on helping customers leverage GitLab’s extensive partner ecosystem to adopt, scale, and grow their businesses on GitLab’s AI-powered DevSecOps Platform.\n* Grab a seat early and as close to the stage as you can because you don’t want to miss out on Google Cloud CEO Thomas Kurian’s opening keynote. You’ll learn how GitLab is building our latest AI-assisted services, such as Explain this Vulnerability and Code Suggestions on Google’s PaLM2-based VertexAI and Codey upgrades.\n* Join us on August 29th at 6:30 p.m. for a Happy Hour at the GitLab storefront at the Metreon before the rest of the evening’s festivities. Don’t forget to [grab an invite](https://page.gitlab.com/20230829-google-cloud-next-meetings-happy-hour.html) before you come.\n\n### Wednesday, Aug. 30\n* Make sure to [register for our Aug. 30th breakfast](https://page.gitlab.com/event_august30_googlenextexecbreakfast_sanfrancisco.html?utm_medium=corpmktg&utm_source=marketo&utm_campaign=googlenextbreakfast20230830&utm_content=ownedevent&utm_budget=fmm) from 8:30-10AM at the GitLab Storefront in the Metreon with lightning talks on key AI initiatives!\n* GitLab’s Chief Product Officer David DeSanto will join Google’s June Yang, vice president of Cloud AI and Industry Solutions, at the first spotlight session (SPTL200)  “[What's new with generative AI at Google Cloud](https://cloud.withgoogle.com/next/session-library?session=SPTL200#partner-summit)” at 1:30 p.m. to share how GitLab has been able to accelerate our AI-based product features by leveraging Google’s Vertex and Codey frameworks.\n* After the session, David will join Google’s Gabe Monroy, vice president of Developer Experience, at the Application Developers spotlight (SPTL201) “[What's next for application developers](https://cloud.withgoogle.com/next/session-library?session=SPTL201#partner-summit)” at 4:45 p.m. They will share some exciting updates on how Google and GitLab are expanding their partnership.\n\n### Thursday, Aug. 31\n* We’ll get more hands on with two panel sessions where audience members will hear from GitLab and Google product leads and get a chance to ask questions and interact with the teams.\n* Starting at 9:00 a.m., Mike Flouton, GitLab vice president of Product, will join Google’s Parashar Shah, product manager for Vertex AI and Codey APIs, on a panel at the (ai-ml208) “[Accelerate software development with Vertex AI’s Codey APIs](https://cloud.withgoogle.com/next/session-library?session=ai-ml208#partner-summit)” session. They will discuss how GitLab uses Google’s many AI tools and frameworks to build Explain this Vulnerability and Code Suggestions on Google’s PaLM2-based VertexAI and Codey upgrades, just a small example of the 15+ new AI-assisted features we have planned. \n* At 10:15 a.m., join Hillary Benson, senior director of Product at GitLab, for a panel with Google team members, including Stephanie Wong, product manager for Google Cloud’s Duet AI. As part of the continuing series on all things AI, the (ai-ml214) “[Prompt engineering: Getting the skill your team needs next](https://cloud.withgoogle.com/next/session-library?session=ai-ml214#partner-summit)” session will discuss how prompt engineering can impact knowledge workers' success in delivering improved productivity and better outcomes.\n\nIf you’re a GitLab partner, drop me a line via LinkedIn and let’s connect in person at Next ‘23!\n","news",[894,896,9,726],"security",{"slug":898,"featured":6,"template":700},"gitlab-and-google-together-at-google-cloud-next-23","content:en-us:blog:gitlab-and-google-together-at-google-cloud-next-23.yml","Gitlab And Google Together At Google Cloud Next 23","en-us/blog/gitlab-and-google-together-at-google-cloud-next-23.yml","en-us/blog/gitlab-and-google-together-at-google-cloud-next-23",{"_path":904,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":905,"content":911,"config":916,"_id":918,"_type":13,"title":919,"_source":15,"_file":920,"_stem":921,"_extension":18},"/en-us/blog/gitlab-at-next-25-transforming-app-modernization",{"title":906,"description":907,"ogTitle":906,"ogDescription":907,"noIndex":6,"ogImage":908,"ogUrl":909,"ogSiteName":686,"ogType":687,"canonicalUrls":909,"schema":910},"GitLab at Next '25: Transforming app modernization","GitLab participated in Google Cloud Next ‘25 and received a fifth consecutive Google Cloud Technology Partner of the Year recognition.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663121/Blog/Hero%20Images/LogoLockupPlusLight.png","https://about.gitlab.com/blog/gitlab-at-next-25-transforming-app-modernization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab at Next '25: Transforming app modernization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2025-04-11\",\n      }",{"title":906,"description":907,"authors":912,"heroImage":908,"date":913,"body":914,"category":894,"tags":915},[763],"2025-04-11","GitLab's presence at Google Cloud Next '25 highlighted our strong partnership with Google Cloud and our joint commitment to accelerating software development and delivery. We were recognized again as a Technology Partner of the Year, and included in key enterprise initiatives like Google Distributed Cloud (GDC) Build Partners and [Startup Perks from Google Cloud](https://cloud.google.com/blog/topics/startups/why-global-startups-are-gathering-at-google-cloud-next25?e=13802955). Our team members demonstrated for attendees how GitLab is positioned to be a critical DevSecOps service for Google Cloud customers.\n\n## Continuing our award-winning partnership excellence\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175937/Blog/nempa4yvfutedz3fpuxx.jpg\" alt=\"GitLab team at Google Cloud Next '25\" align=\"left\" width=\"400px\" style=\"padding-right: 20px; padding-bottom: 10px\"/>\n\nWe're thrilled to announce that GitLab has once again been named a [Google Cloud Technology Partner of the Year award winner](https://about.gitlab.com/press/releases/2025-04-08-gitlab-wins-a-google-cloud-technology-partner-of-the-year-award-for-devops/), marking our fifth consecutive time receiving this prestigious honor. This remarkable achievement reaffirms our position as Google Cloud's primary DevOps partner, consistently delivering exceptional value year after year. The continued recognition highlights how our collaboration with Google Cloud creates tangible business outcomes for customers, enabling organizations across industries to build, secure, and deploy applications with efficiency and confidence.\n\n## Google Distributed Cloud: DevSecOps for highly regulated environments\n\nAnother significant milestone announced at Next '25 was GitLab's \"Google Cloud Ready - Distributed Cloud\" certification. This designation enables organizations to implement GitLab in air-gapped environments, addressing critical security and compliance requirements.\n\nAs an end-to-end DevSecOps solution available on Google Distributed Cloud, GitLab enables sovereign development and operations for workloads critical to national security and regulatory compliance. This integration is particularly valuable for government agencies and financial institutions that require the highest levels of data sovereignty while maintaining modern development practices.\n\n## GitLab perks for Google Startups\n\nGitLab is a Featured Partner of the new Startup Perks program from Google Cloud. This partnership ties up with our own [GitLab for Startups](https://about.gitlab.com/solutions/startups/google-cloud/) and is meant to jumpstart new tech ventures with key DevSecOps capabilities that can help with fast growth and scaling.\n\nAs one of the [Featured Perks partners](https://cloud.google.com/startup/perks), eligible startups can get free or discounted access to one year of [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) for 20 licenses. For seed or early stage startups, this benefit can help ensure collaboration, efficiency, and security without sacrificing speed and agility.\n\n## Thoughts from the dais\n\nGitLab experts shared valuable insights across multiple speaking sessions at Next '25, delivering practical knowledge on AI-powered DevSecOps, platform engineering, and cloud application delivery:\n\n* __[AI DevOps panel](https://cloud.withgoogle.com/next/25/session-library?session=BRK2-163&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Mike Flouton, GitLab Vice President of Product Management, joined industry leaders to discuss how AI code assist tools boost productivity while enhancing application performance.\n\n* __[Software Logistics - The Missing Link in Modern Platform Engineering](https://cloud.withgoogle.com/next/25/session-library?session=CT2-16&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ GitLab Field CTO Lee Faus explored how effective software logistics create the foundation for successful platform engineering initiatives.\n\n* __[Revolutionizing Cloud Application Delivery with Intelligent Agents](https://cloud.withgoogle.com/next/25/session-library?session=CT2-17&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Faus also demonstrated how intelligent agents are transforming cloud application delivery pipelines.\n\n## Engaging attendees across Next '25\n\nIn addition to our speaking sessions, GitLab maintained a strong presence throughout Next '25. At our booth #2170 on the expo floor, our team engaged with hundreds of attendees through demonstrations and lightning talks featuring both GitLab experts and partners like Arctiq and SADA.\n\nThe Google Cloud Makerspace's Dev Tools Pantry became a hub of innovation and collaboration. John Coghlan, Director of Developer Advocacy, observed: \"It was great to connect with many GitLab and Google Cloud customers in the Dev Tools Pantry in the Makerspace. We loved seeing the creative solutions that people came up with around developer experience and simplified deployments using GitLab and Google Cloud as their ingredients.\"\n\nThese hands-on experiences showcased how GitLab's DevSecOps solutions integrate well with Google Cloud services, with our AI-powered capabilities demonstrations drawing particular interest from attendees looking to enhance developer productivity and application security.\n\n## GitLab and Google Cloud: Transforming the future together\n\nThe energy witnessed at Next '25 exemplifies why GitLab and Google Cloud make such powerful partners. Together, we help organizations to transform how they build, secure, and deploy applications through:\n\n* AI-assisted development capabilities and collaborative workflows that can help accelerate innovation in Google Cloud environments\n\n* Shift-left security approach that integrates with Google Cloud's security-first architecture to identify vulnerabilities early in the development lifecycle\n\n* Flexible deployment options and comprehensive observability that work harmoniously with Google Cloud infrastructure to help streamline operations\n\nAs demonstrated at Next '25, the GitLab and Google Cloud partnership delivers tangible advantages for development teams facing real-world challenges – whether accelerating AI adoption, strengthening security in regulated environments, or streamlining complex deployment pipelines. The technical integration points and customer success stories shared throughout the event underscore that this collaboration continues to produce practical solutions that matter.\n\n> #### Discover how GitLab and Google Cloud can transform your application development experience at [GitLab's Google Cloud partnership page](https://about.gitlab.com/partners/technology-partners/google-cloud-platform/).",[9,495,278,283,894],{"slug":917,"featured":6,"template":700},"gitlab-at-next-25-transforming-app-modernization","content:en-us:blog:gitlab-at-next-25-transforming-app-modernization.yml","Gitlab At Next 25 Transforming App Modernization","en-us/blog/gitlab-at-next-25-transforming-app-modernization.yml","en-us/blog/gitlab-at-next-25-transforming-app-modernization",{"_path":923,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":924,"content":930,"config":936,"_id":938,"_type":13,"title":939,"_source":15,"_file":940,"_stem":941,"_extension":18},"/en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"title":925,"description":926,"ogTitle":925,"ogDescription":926,"noIndex":6,"ogImage":927,"ogUrl":928,"ogSiteName":686,"ogType":687,"canonicalUrls":928,"schema":929},"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less","Install GitLab's Runner on GKE in a few simple steps and get started with GitLab CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667003/Blog/Hero%20Images/gke_in_15_cover_2.jpg","https://about.gitlab.com/blog/gitlab-ci-on-google-kubernetes-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":925,"description":926,"authors":931,"heroImage":927,"date":933,"body":934,"category":695,"tags":935},[932],"Elliot Rushton","2020-03-27","If you use [GitLab Self-Managed](/pricing/#self-managed), then getting started with GitLab CI using [GitLab's integration with Google Kubernetes Engine (GKE)](/partners/technology-partners/google-cloud-platform/) can be accomplished in a few simple steps. We have several blog posts and documentation that provide detailed [setup instructions for working with Kubernetes clusters](#other-resources). In this post, we highlight the essential steps so that you can get going with GitLab CI/CD in less than 15 minutes.\n\nBy using the GitLab and GKE integration, with one click, you install GitLab Runners on GKE and immediately start running your CI pipelines. Runners are the lightweight agents that execute the CI jobs in your [GitLab CI/CD](/topics/ci-cd/) pipeline.\n\n## Prerequisites:\n\nThe following pre-requisities will need to have been configured in order for you to use the built in GitLab GKE integration:\n- GitLab instance installed and configured with user credentials\n- [Google OAuth2 OmniAuth Provider](https://docs.gitlab.com/ee/integration/google.html) installed and configured on your GitLab instance\n- A Google Cloud project with the following [APIs enabled](https://docs.gitlab.com/ee/integration/google.html#enabling-google-oauth):\n  - Google Kubernetes Engine API\n  - Cloud Resource Manager API\n  - Cloud Billing API\n\n## Get started\n\n![Setup pipeline](https://about.gitlab.com/images/blogimages/ci-gke-in-15/gke_in_15_pipeline.png){: .shadow.medium.center}\n\n### Step 1\n\nWe’re going to add a shared runner at the instance level. First, as an administrator, click the “Admin Area” icon\n\n![Runner setup step 1](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_001.png){: .shadow.medium.center}\n\nThen on the left menu, select “Kubernetes”\n\n![Runner setup step 2](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_002.png){: .shadow.medium.center}\n\n### Step 2\n\nClick the green “Add Kubernetes cluster” button.\n\n![Runner setup step 3](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_003.png){: .shadow.medium.center}\n\n### Step 3\n\nThe screen to “Add a Kubernetes cluster integration” should come up. Click on the “Google GKE” icon on the right.\n\n![Runner setup step 4](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_004.png){: .shadow.medium.center}\n\n### Step 4\n\nGive your cluster a name, and select a “Google Cloud Platform project” from your linked GCP account. If no projects are populated in the menu then either your Google OAUTH2 integration isn’t configured correctly or your project is missing the needed permissions. Check that these are set up and that the [APIs mentioned in the prerequisites above](#prerequisites) are enabled.\n\nChoose a zone in which to run your cluster. For the purposes of running CI, the number of nodes in your cluster is going to be how many simultaneous jobs you can run at given time. As we are using the built-in GitLab Google Kubernetes integration, you can set a maximum of four nodes.\nHere we set that to three.\n\nClick “Create Kubernetes Cluster”\n\n![Runner setup step 5](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_005.png){: .shadow.medium.center}\n\nIt takes a few minutes for the cluster to be created. While it’s happening you should see a screen like this. You can leave this screen and come back (by going to “Admin Area> Kubernetes > [your cluster name]”)\n\n![Runner setup step 6](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_006.png){: .shadow.medium.center}\n\n### Step 5\n\nOnce the cluster has been created, we need to install two applications. First, install “Helm Tiller” by clicking on the “Install” button next to it.\n\n![Runner setup step 7](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_007.png){: .shadow.medium.center}\n\nThis takes a moment, but should be much quicker than creating the cluster initially was.\n\n![Runner setup step 8](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_008.png){: .shadow.medium.center}\n\n### Step 6\n\nNow that Helm Tiller is installed, more applications can be installed. For this tutorial we only need to install the “GitLab Runner” application. Click the install button next to GitLab Runner.\n\n![Runner setup step 9](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_009.png){: .shadow.medium.center}\n\nAgain, this should go pretty quickly.\n\n![Runner setup step 10](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_010.png){: .shadow.medium.center}\n\nOnce done, the button will change to an “Uninstall” button. You’re now set up with shared runners on your GitLab instance and can run your first CI pipeline!\n\n![Runner setup step 11](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_011.png){: .shadow.medium.center}\n\n### Next steps\n\nNow that you are up and running with GitLab CI/CD on GKE, you can build and run your first GitLab CI/CD pipeline. Here are links to a few resources to get you started.\n\n- [Getting Started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)\n- [How to build a CI/CD pipeline in 20 minutes or less](/blog/building-a-cicd-pipeline-in-20-mins/)\n- [Getting started with Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\n\nIf you are planning to manage your own fleet of GitLab Runners, then you may also be thinking about how best to set up autoscaling of GitLab Runners. As we have just set up your first Runner on GKE, then you can review the [GitLab Runner Kubernetes Executor docs](https://docs.gitlab.com/runner/executors/kubernetes.html) for additional details as to how the GitLab Runner uses Kubernetes to run builds on a Kubernetes cluster.\n\n### Other resources\n\n- [Scalable app depoyment webcast](https://about.gitlab.com/webcast/scalable-app-deploy/)\n- [Install GitLab on a cloud native environment](https://docs.gitlab.com/charts/)\n- [Adding and removing Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html)\n- [Deploy production-ready GitLab on Google Kubernetes Engine](https://cloud.google.com/solutions/deploying-production-ready-gitlab-on-gke)\n\nCover image by [Agê Barros](https://unsplash.com/photos/rBPOfVqROzY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[232,811,789,108,810,9],{"slug":937,"featured":6,"template":700},"gitlab-ci-on-google-kubernetes-engine","content:en-us:blog:gitlab-ci-on-google-kubernetes-engine.yml","Gitlab Ci On Google Kubernetes Engine","en-us/blog/gitlab-ci-on-google-kubernetes-engine.yml","en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"_path":943,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":944,"content":949,"config":955,"_id":957,"_type":13,"title":958,"_source":15,"_file":959,"_stem":960,"_extension":18},"/en-us/blog/gitlab-com-stability-post-gcp-migration",{"title":945,"description":946,"ogTitle":945,"ogDescription":946,"noIndex":6,"ogImage":801,"ogUrl":947,"ogSiteName":686,"ogType":687,"canonicalUrls":947,"schema":948},"What's up with GitLab.com? Check out the latest data on its stability","Let's take a look at the data on the stability of GitLab.com from before and after our recent migration from Azure to GCP, and dive into why things are looking up.","https://about.gitlab.com/blog/gitlab-com-stability-post-gcp-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's up with GitLab.com? Check out the latest data on its stability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-10-11\",\n      }",{"title":945,"description":946,"authors":950,"heroImage":801,"date":952,"body":953,"category":695,"tags":954},[951],"Andrew Newdigate","2018-10-11","\nThis post is inspired by [this comment on Reddit](https://www.reddit.com/r/gitlab/comments/9f71nq/thanks_gitlab_team_for_improving_the_stability_of/),\nthanking us for improving the stability of GitLab.com. Thanks, hardwaresofton! Making GitLab.com\nready for your mission-critical workloads has been top of mind for us for some time, and it's\ngreat to hear that users are noticing a difference.\n\n_Please note that the numbers in this post differ slightly from the Reddit post as the data has changed since that post._\n\nWe will continue to work hard on improving the availability and stability of the platform. Our\ncurrent goal is to achieve 99.95 percent availability on GitLab.com – look out for an upcoming\npost about how we're planning to get there.\n\n## GitLab.com stability before and after the migration\n\nAccording to [Pingdom](http://stats.pingdom.com/81vpf8jyr1h9), GitLab.com's availability for the year to date, up until the migration was **[99.68 percent](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**, which equates to about 32 minutes of downtime per week on average.\n\nSince the migration, our availability has improved greatly, although we have much less data to compare with than in Azure.\n\n![Availability Chart](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458170195&format=image)\n\nUsing data publicly available from Pingdom, here are some stats about our availability for the year to date:\n\n| Period                                 | Mean-time between outage events |\n| -------------------------------------- | ------------------------------- |\n| Pre-migration (Azure)                  | **1.3 days**                    |\n| Post-migration (GCP)                   | **7.3 days**                    |\n| Post-migration (GCP) excluding 1st day | **12 days**                     |\n\nThis is great news: we're experiencing outages less frequently. What does this mean for our availability, and are we on track to achieve our goal of 99.95 percent?\n\n| Period                    | Availability                                                                                                                   | Downtime per week |\n| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------- |\n| Pre-migration (Azure)     | **[99.68%](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**  | **32 minutes**    |\n| Post-migration (GCP)      | **[99.88 %](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=B3)** | **13 minutes**    |\n| Target – not yet achieved | **99.95%**                                                                                                                     | **5 minutes**     |\n\nDropping from 32 minutes per week average downtime to 13 minutes per week means we've experienced a **61 percent improvement** in our availability following our migration to Google Cloud Platform.\n\n## Performance\n\nWhat about the performance of GitLab.com since the migration?\n\nPerformance can be tricky to measure. In particular, averages are a terrible way of measuring performance, since they neglect outlying values. One of the better ways to measure performance is with a latency histogram chart. To do this, we imported the GitLab.com access logs for July (for Azure) and September (for Google Cloud Platform) into [Google BigQuery](https://cloud.google.com/bigquery/), then selected the 100 most popular endpoints for each month and categorised these as either API, web, git, long-polling, or static endpoints. Comparing these histograms side-by-side allows us to study how the performance of GitLab.com has changed since the migration.\n\n![GitLab.com Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/azure_v_gcp_latencies.gif)\n\nIn this histogram, higher values on the left indicate better performance. The right of the graph is the \"_tail_\", and the \"_fatter the tail_\", the worse the user experience.\n\nThis graph shows us that with the move to GCP, more requests are completing within a satisfactory amount of time.\n\nHere's two more graphs showing the difference for API and Git requests respectively.\n\n![API Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/api-performance-histogram.png)\n\n![Git Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/git-performance-histogram.png)\n\n## Why these improvements?\n\nWe chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload, particularly as we move towards running GitLab.com in [Kubernetes](/solutions/kubernetes/).\n\nHowever, there are many other reasons unrelated to our change in cloud provider for these improvements to stability and performance.\n\n> #### _“We chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload”_\n\nLike any large SaaS site, GitLab.com is a large, complicated system, and attributing availability changes to individual changes is extremely difficult, but here are a few factors which may be effecting our availability and performance:\n\n### Reason #1: Our Gitaly Fleet on GCP is much more powerful than before\n\nGitaly is responsible for all Git access in the GitLab application. Before Gitaly, Git access occurred directly from within Rails workers. Because of the scale we run at, we require many servers serving the web application, and therefore, in order to share git data between all workers, we relied on NFS volumes. Unfortunately this approach doesn't scale well, which led to us building Gitaly, a dedicated Git service.\n\n> #### _“We've opted to give our fleet of 24 Gitaly servers a serious upgrade”_\n\n#### Our upgraded Gitaly fleet\n\nAs part of the migration, we've opted to give our fleet of 24 [Gitaly](/blog/the-road-to-gitaly-1-0/) servers a serious upgrade. If the old fleet was the equivalent of a nice family sedan, the new fleet are like a pack of snarling musclecars, ready to serve your Git objects.\n\n| Environment | Processor                       | Number of cores per instance | RAM per instance |\n| ----------- | ------------------------------- | ---------------------------- | ---------------- |\n| Azure       | Intel Xeon Ivy Bridge @ 2.40GHz | 8                            | 55GB             |\n| GCP         | Intel Xeon Haswell @ 2.30GHz    | **32**                       | **118GB**        |\n\nOur new Gitaly fleet is much more powerful. This means that Gitaly can respond to requests more quickly, and deal better with unexpected traffic surges.\n\n#### IO performance\n\nAs you can probably imagine, serving [225TB of Git data](https://dashboards.gitlab.com/d/ZwfWfY2iz/vanity-metrics-dashboard?orgId=1) to roughly half-a-million active users a week is a fairly IO-heavy operation. Any performance improvements we can make to this will have a big impact on the overall performance of GitLab.com.\n\nFor this reason, we've focused on improving performance here too.\n\n| Environment | RAID         | Volumes | Media    | filesystem | Performance                                                            |\n| ----------- | ------------ | ------- | -------- | ---------- | ---------------------------------------------------------------------- |\n| Azure       | RAID 5 (lvm) | 16      | magnetic | xfs        | 5k IOPS, 200MB/s (_per disk_) / 32k IOPS **1280MB/s** (_volume group_) |\n| GCP         | No raid      | 1       | **SSD**  | ext4       | **60k read IOPs**, 30k write IOPs, 800MB/s read 200MB/s write          |\n\nHow does this translate into real-world performance? Here are average read and write times across our Gitaly fleet:\n\n##### IO performance is much higher\n\nHere are some comparative figures for our Gitaly fleet from Azure and GCP. In each case, the performance in GCP is much better than in Azure, although this is what we would expect given the more powerful fleet.\n\n[![Disk read time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458168633&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk write time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=884528549&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk Queue length graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=2135164979&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172)\n\nNote: For reference: for Azure, this uses the average times for the week leading up to the failover. For GCP, it's an average for the week up to October 2, 2018.\n\nThese stats clearly illustrate that our new fleet has far better IO performance than our old cluster. Gitaly performance is highly dependent on IO performance, so this is great news and goes a long way to explaining the performance improvements we're seeing.\n\n### Reason #2: Fewer \"unicorn worker saturation\" errors\n\n![HTTP 503 Status GitLab](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/facepalm-503.png)\n\nUnicorn worker saturation sounds like it'd be a good thing, but it's really not!\n\nWe ([currently](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/1899)) rely on [unicorn](https://bogomips.org/unicorn/), a Ruby/Rack http server, for serving much of the application. Unicorn uses a single-threaded model, which uses a fixed pool of workers processes. Each worker can handle only one request at a time. If the worker gives no response within 60 seconds, it is terminated and another process is spawned to replace it.\n\n> #### _“Unicorn worker saturation sounds like it'd be a good thing, but it's really not!”_\n\nAdd to this the lack of autoscaling technologies to ramp the fleet up when we experience high load volumes, and this means that GitLab.com has a relatively static-sized pool of workers to handle incoming requests.\n\nIf a Gitaly server experiences load problems, even fast [RPCs](https://en.wikipedia.org/wiki/Remote_procedure_call) that would normally only take milliseconds, could take up to several seconds to respond – thousands of times slower than usual. Requests to the unicorn fleet that communicate with the slow server will take hundreds of times longer than expected. Eventually, most of the fleet is handling requests to that affected backend server. This leads to a queue which affects all incoming traffic, a bit like a tailback on a busy highway caused by a traffic jam on a single offramp.\n\nIf the request gets queued for too long – after about 60 seconds – the request will be cancelled, leading to a 503 error. This is indiscriminate – all requests, whether they interact with the affected server or not, will get cancelled. This is what I call unicorn worker saturation, and it's a very bad thing.\n\nBetween February and August this year we frequently experienced this phenomenon.\n\nThere are several approaches we've taken to dealing with this:\n\n- **Fail fast with aggressive timeouts and circuitbreakers**: Timeouts mean that when a Gitaly request is expected to take a few milliseconds, they time out after a second, rather than waiting for the request to time out after 60 seconds. While some requests will still be affected, the cluster will remain generally healthy. Gitaly currently doesn't use circuitbreakers, but we plan to add this, possibly using [Istio](https://istio.io/docs/tasks/traffic-management/circuit-breaking/) once we've moved to Kubernetes.\n\n- **Better abuse detection and limits**: More often than not, server load spikes are driven by users going against our fair usage policies. We built tools to better detect this and over the past few months, an abuse team has been established to deal with this. Sometimes, load is driven through huge repositories, and we're working on reinstating fair-usage limits which prevent 100GB Git repositories from affecting our entire fleet.\n\n- **Concurrency controls and rate limits**: For limiting the blast radius, rate limiters (mostly in HAProxy) and concurrency limiters (in Gitaly) slow overzealous users down to protect the fleet as a whole.\n\n### Reason #3: GitLab.com no longer uses NFS for any Git access\n\nIn early September we disabled Git NFS mounts across our worker fleet. This was possible because Gitaly had reached v1.0: the point at which it's sufficiently complete. You can read more about how we got to this stage in our [Road to Gitaly blog post](/blog/the-road-to-gitaly-1-0/).\n\n### Reason #4: Migration as a chance to reduce debt\n\nThe migration was a fantastic opportunity for us to improve our infrastructure, simplify some components, and otherwise make GitLab.com more stable and more observable, for example, we've rolled out new **structured logging infrastructure**.\n\nAs part of the migration, we took the opportunity to move much of our logging across to structured logs. We use [fluentd](https://www.fluentd.org/), [Google Pub/Sub](https://cloud.google.com/pubsub/docs/overview), [Pubsubbeat](https://github.com/GoogleCloudPlatform/pubsubbeat), storing our logs in [Elastic Cloud](https://www.elastic.co/cloud) and [Google Stackdriver Logging](https://cloud.google.com/logging/). Having reliable, indexed logs has allowed us to reduce our mean-time to detection of incidents, and in particular detect abuse. This new logging infrastructure has also been invaluable in detecting and resolving several security incidents.\n\n> #### _“This new logging infrastructure has also been invaluable in detecting and resolving several security incidents”_\n\nWe've also focused on making our staging environment much more similar to our production environment. This allows us to test more changes, more accurately, in staging before rolling them out to production. Previously the team was maintaining\na limited scaled-down staging environment and many changes were not adequately tested before being rolled out. Our environments now share a common configuration and we're working to automate all [terraform](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5079) and [chef](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5078) rollouts.\n\n### Reason #5: Process changes\n\nUnfortunately many of the worst outages we've experienced over the past few years have been self-inflicted. We've always been transparent about these — and will continue to be so — but as we rapidly grow, it's important that our processes scale alongside our systems and team.\n\n> #### _“It's important that our processes scale alongside our systems and team”_\n\nIn order to address this, over the past few months, we've formalized our change and incident management processes. These processes respectively help us to avoid outages and resolve them quicker when they do occur.\n\nIf you're interested in finding out more about the approach we've taken to these two vital disciplines, they're published in our handbook:\n\n- [GitLab.com's Change Management Process](/handbook/engineering/infrastructure/change-management/)\n- [GitLab.com's Incident Management Process](/handbook/engineering/infrastructure/incident-management/)\n\n### Reason #6: Application improvement\n\nEvery GitLab release includes [performance and stability improvements](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&state=opened&label_name%5B%5D=performance); some of these have had a big impact on GitLab's stability and performance, particularly n+1 issues.\n\nTake Gitaly for example: like other distributed systems, Gitaly can suffer from a class of performance degradations known as \"n+1\" problems. This happens when an endpoint needs to make many queries (_\"n\"_) to fulfill a single request.\n\n> Consider an imaginary endpoint which queried Gitaly for all tags on a repository, and then issued an additional query for each tag to obtain more information. This would result in n + 1 Gitaly queries: one for the initial tag, and then n for the tags. This approach would work fine for a project with 10 tags – issuing 11 requests, but a project with 1000 tags, this would result in 1001 Gitaly calls, each with a round-trip time, and issued in sequence.\n\n![Latency drop in Gitaly endpoints](https://about.gitlab.com../../images/blogimages/whats-up-with-gitlab-com/drop-off.png)\n\nUsing data from Pingdom, this chart shows long-term performance trends since the start of the year. It's clear that latency improved a great deal on May 7, 2018. This date happens to coincide with the RC1 release of GitLab 10.8, and its deployment on GitLab.com.\n\nIt turns out that this was due to a [single fix on n+1 on the merge request page being resolved](https://gitlab.com/gitlab-org/gitlab-ce/issues/44052).\n\nWhen running in development or test mode, GitLab now detects n+1 situations and we have compiled [a list of known n+1s](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=performance&label_name[]=Gitaly&label_name[]=technical%20debt). As these are resolved we expect even more performance improvements.\n\n![GitLab Summit - South Africa - 2018](https://about.gitlab.com/images/summits/2018_south-africa_team.jpg)\n\n### Reason #7: Infrastructure team growth and reorganization\n\nAt the start of May 2018, the Infrastructure team responsible for GitLab.com consisted of five engineers.\n\nSince then, we've had a new director join the Infrastructure team, two new managers, a specialist [Postgres DBRE](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/13778), and four new [SREs](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). The database team has been reorganized to be an embedded part of infrastructure group. We've also brought in [Ongres](https://www.ongres.com/), a specialist Postgres consultancy, to work alongside the team.\n\nHaving enough people in the team has allowed us to be able to split time between on-call, tactical improvements, and longer-term strategic work.\n\nOh, and we're still hiring! If you're interested, check out [our open positions](/jobs/) and choose the Infrastructure Team 😀\n\n## TL;DR: Conclusion\n\n1. GitLab.com is more stable: availability has improved 61 percent since we migrated to GCP\n1. GitLab.com is faster: latency has improved since the migration\n1. We are totally focused on continuing these improvements, and we're building a great team to do it\n\nOne last thing: our Grafana dashboards are open, so if you're interested in digging into our metrics in more detail, visit [dashboards.gitlab.com](https://dashboards.gitlab.com) and explore!\n",[810,9,722,811,894,724],{"slug":956,"featured":6,"template":700},"gitlab-com-stability-post-gcp-migration","content:en-us:blog:gitlab-com-stability-post-gcp-migration.yml","Gitlab Com Stability Post Gcp Migration","en-us/blog/gitlab-com-stability-post-gcp-migration.yml","en-us/blog/gitlab-com-stability-post-gcp-migration",{"_path":962,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":963,"content":969,"config":975,"_id":977,"_type":13,"title":978,"_source":15,"_file":979,"_stem":980,"_extension":18},"/en-us/blog/gitlab-gke-autopilot",{"title":964,"description":965,"ogTitle":964,"ogDescription":965,"noIndex":6,"ogImage":966,"ogUrl":967,"ogSiteName":686,"ogType":687,"canonicalUrls":967,"schema":968},"How to use GitLab with GKE Autopilot","GitLab works out of the box with the new GKE Autopilot from Google Cloud, a managed variant of the popular Google Kubernetes Engine.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681920/Blog/Hero%20Images/kubernetes.png","https://about.gitlab.com/blog/gitlab-gke-autopilot","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab with GKE Autopilot\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":964,"description":965,"authors":970,"heroImage":966,"date":972,"body":973,"category":894,"tags":974},[971],"Abubakar Siddiq Ango","2021-02-24","\n\nIn the cloud native landscape, there are dozens of providers that offer managed Kubernetes services. Despite the abstraction, and ease of use promised, a major problem remains: getting the node size right. You want it to match your workloads so that you don’t under-provision – making the workloads unstable – or over-provision and rake in unnecessary costs. \n\n[GKE Autopilot from Google Cloud](https://cloud.google.com/blog/products/containers-kubernetes/introducing-gke-autopilot) solves this problem by enabling your team to focus on building your solutions with a fully managed and opinionated variant of Google Kubernetes Engine (GKE), where nodes are automatically provisioned based on your workload requirements and with no need to be managed independently. \n\nGKE Autopilot uses the resource specification in the PodSpec of your deployment to provision nodes or use defaults, automatically resize the nodes, or provision new nodes as the pods’ needs change. GitLab and Google Cloud officially support several use cases, including running GitLab and GitLab Runners as workloads on GKE Autopilot clusters, as well as using GitLab CI/CD to deploy applications onto GKE Autopilot.\n\n## GitLab and GKE Autopilot\n\n### GitLab Server\n\nGitLab can be installed on GKE Autopilot easily out of the box using the official Helm Charts and can be configured to match your company’s use case, such as external object storage and database. GKE Autopilot works to ensure the right sizes and number of nodes are provisioned based on the requirements specified in the GitLab charts and your customizations. You can access other resources in Google Cloud, such as storage and databases using Google Cloud Workload Identity.\n\nAll GKE Autopilot clusters come with Google Cloud Workload Identity pre-configured. Workload Identity allows you to bind Kubernetes Service Accounts to Google Service Accounts, with whatever permission that Google Service Account has. This can include resources in other Google Cloud platform projects.\n\nIn the first part of the GitLab with GKE Autopilot demo, I demonstrate how to install GitLab on a GKE Autopilot cluster:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/cNffh-qyXhQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### GitLab Runner\n\nThe GitLab Runner can be deployed on GKE Autopilot in unprivileged mode, allowing it to only run GitLab CI jobs that do not require privileged pods or Docker in Docker due to the lack of support for privileged pods on GKE Autopilot. To build container images, [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) or its likes can be used as an alternative to Docker. This applies to the bundled runner in the official GitLab Helm chart or when deployed independently using the official GitLab Runner chart. This also affects jobs using GitLab Auto DevOps, but works best when an independent Runner (set up on a GKE Standard cluster or virtual machine) is registered with the GitLab server running on GKE Autopilot.\n\n### Integrating GKE Autopilot clusters\n\nGKE Autopilot clusters integrate with GitLab just like a GKE Standard cluster. There are two options, the preferred of which is to use the [GitLab Agent for Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/), especially if you are concerned about security or your cluster is behind a firewall. You can learn more about this in our [detailed documentation](https://docs.gitlab.com/ee/user/clusters/agent/).\nAlternatively, you can create a cluster-admin and provide the cluster certificate and token to [integrate with the cluster](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html).  As of the time of writing, GKE Autopilot clusters cannot be created from GitLab like standard GKE clusters. The DinD limitation also affects the runner listed in the GitLab-managed apps that you can install as part of the integration. \n\nIn the second part of the demo video, I demonstrate how to integrate GitLab with a GKE Autopilot cluster and deploy an application using Auto DevOps.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/rCwHL3hQEWU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Considerations\n\nGKE Autopilot is opinionated and less configurable than GKE Standard. As a managed service, it allows you to focus on delivering the best solutions to your users and not worry about operations; these are limitations common for such managed Kubernetes services. \n\nAdministrative access to the nodes provisioned by GKE Autopilot is not supported, thus making any operation requiring access to the nodes limited. Host options, node selectors, node affinity/anti-affinity, taints, and tolerations are other functionalities that apply at the node level in GKE Standard but are not supported in Autopilot.\n\nWhen integrating an Autopilot cluster with GitLab, you cannot install the bundled cert-manager. I encountered an error while testing, stating that `mutatingwebhookconfigurations/` is managed and access is denied in GKE Autopilot. Alternatively, you can follow the directions provided in the official cert-manager documentation.\n\n## Wrapping up\n\nGKE Autopilot is designed to implement Google Cloud-developed best practices and has been fine-tuned to provide an ideal user experience. You can move from idea to production and scale worry-free when you integrate GitLab with GKE Autopilot, allowing you to deploy and monitor your application’s health, all within GitLab. If you also choose to deploy GitLab itself on GKE Autopilot, our official Helm chart will work out of the box.\n",[811,810,9,232],{"slug":976,"featured":6,"template":700},"gitlab-gke-autopilot","content:en-us:blog:gitlab-gke-autopilot.yml","Gitlab Gke Autopilot","en-us/blog/gitlab-gke-autopilot.yml","en-us/blog/gitlab-gke-autopilot",{"_path":982,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":983,"content":988,"config":994,"_id":996,"_type":13,"title":997,"_source":15,"_file":998,"_stem":999,"_extension":18},"/en-us/blog/gitlab-google-cloud-integrations-now-in-public-beta",{"title":984,"description":985,"ogTitle":984,"ogDescription":985,"noIndex":6,"ogImage":908,"ogUrl":986,"ogSiteName":686,"ogType":687,"canonicalUrls":986,"schema":987},"GitLab-Google Cloud integrations now in public beta","The multiple integrations streamline authentication, automate CI/CD, and reduce context switching across GitLab and Google Cloud.","https://about.gitlab.com/blog/gitlab-google-cloud-integrations-now-in-public-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab-Google Cloud integrations now in public beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2024-04-09\",\n      }",{"title":984,"description":985,"authors":989,"heroImage":908,"date":991,"body":992,"category":894,"tags":993},[990],"Jackie Porter","2024-04-09","In 2023, we announced our plan [to integrate GitLab with Google Cloud](https://about.gitlab.com/blog/gitlab-google-partnership-s3c/). This week, at Google Cloud Next '24, we are announcing that our first integrations from that partnership are now in public beta. \n\nThese critical integrations streamline authentication, automate CI/CD, and decrease context switching across GitLab and Google Cloud, reducing the friction involved in using the two and improving the overall developer experience by helping them focus on deploying code, and not setting up infrastructure. GitLab users can learn [how to set up the GitLab-Google Cloud integrations](https://docs.gitlab.com/ee/tutorials/set_up_gitlab_google_integration/).\n\n## Streamline authentication\n\nWhen organizations want to use GitLab and Google Cloud together, they typically need to use a service account key to access Google Cloud resources from GitLab. This approach can present an unnecessary security risk and add additional maintenance burden.\n\nWith the new GitLab-Google Cloud integration, GitLab customers can use industry-standard methods identity and access management ([IAM](https://cloud.google.com/security/products/iam)) and Workload Identity Federation ([WLIF](https://cloud.google.com/iam/docs/workload-identity-federation)) for authentication. This replaces the need for cross-system service accounts, decreasing the risk associated with service account keys, and minimizing management overhead for rotating keys. To learn more about setting up IAM and WLIF, read our [documentation](https://docs.gitlab.com/ee/integration/google_cloud_iam.html).\n\nWe also added a method to streamline authentication from CI/CD pipelines using a developer-minded approach with a new identity keyword. Learn more in the [identity keyword documentation](https://docs.gitlab.com/ee/ci/yaml/#identity).\n\n## Automate CI/CD\n\nA primary objective of the GitLab-Google Cloud partnership is to help organizations deploy applications to Google Cloud faster. With this in mind, we have built two mechanisms to support that: runner configuration automation and a library of Google Cloud Services components.\n\nRunners are the backbone of all CI/CD jobs, but installing, managing, and updating them can be time-consuming and inefficient. GitLab offers [runners](https://docs.gitlab.com/ee/ci/runners/) built on infrastructure as code (IaC) best practices, which means we provision and manage runners for you, including deleting them once they’ve done their job. With our runner configuration automation for Google Cloud, our hosted runners are now available to users on Google Cloud, without needing to leave GitLab.\nCheck out our [setup documentation](https://docs.gitlab.com/ee/tutorials/set_up_gitlab_google_integration/#set-up-gitlab-runner-to-execute-your-cicd-jobs-on-google-cloud) to learn more.\n\nWe’ve also worked with Google Cloud to provide a [library of Google components in GitLab’s CI/CD Catalog](https://gitlab.com/google-gitlab-components). These components make it easy to configure your pipelines to deploy to Google Cloud Services, including Google Kubernetes Engine, Artifact Registry, and Cloud Deploy. Rather than search the web for the right YAML configurations, simply browse the CI/CD Catalog within GitLab and import the component configuration into your pipeline’s .yml file.\n\n![gitlab-google image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677557/Blog/Content%20Images/Screenshot_2024-04-09_at_11.43.27_AM.png)\n\n> Learn more about [how to use Google Cloud Components](https://docs.gitlab.com/ee/tutorials/set_up_gitlab_google_integration/#deploy-to-google-cloud-with-cicd-components).\n\n## Reduce context switching\n\nGitLab and Google Cloud together create a single data plane for all your software development needs, from source code management to deployment. This means full visibility into your product performance metrics, security and compliance policies, and insights to empower you to optimize your software delivery process – all without having to context switch between multiple systems. For users of Google Cloud and GitLab, this is a game changer.\n\nOur guiding principles throughout this integration plan were developer experience and efficiency. As an example, check out this demo showing how simple it is to integrate GitLab with Google Cloud Artifact Registry.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CcPl3k3IHjM?si=XNfGnK9Qlx7XxD3v\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What’s next?\n\nWe are now in beta and welcome your feedback. To begin using the Google Cloud integrations, follow the steps in this [tutorial](https://docs.gitlab.com/ee/tutorials/set_up_gitlab_google_integration/).",[9,232,894],{"slug":995,"featured":90,"template":700},"gitlab-google-cloud-integrations-now-in-public-beta","content:en-us:blog:gitlab-google-cloud-integrations-now-in-public-beta.yml","Gitlab Google Cloud Integrations Now In Public Beta","en-us/blog/gitlab-google-cloud-integrations-now-in-public-beta.yml","en-us/blog/gitlab-google-cloud-integrations-now-in-public-beta",{"_path":1001,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1002,"content":1007,"config":1012,"_id":1014,"_type":13,"title":1015,"_source":15,"_file":1016,"_stem":1017,"_extension":18},"/en-us/blog/gitlab-google-partnership-s3c",{"title":1003,"description":1004,"ogTitle":1003,"ogDescription":1004,"noIndex":6,"ogImage":886,"ogUrl":1005,"ogSiteName":686,"ogType":687,"canonicalUrls":1005,"schema":1006},"Better together with GitLab and Google Cloud","GitLab’s DevSecOps workflow now integrates with Google Cloud secure Artifact Registry, security scanning, and deployment toolchains.","https://about.gitlab.com/blog/gitlab-google-partnership-s3c","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Better together with GitLab and Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2023-08-29\",\n      }",{"title":1003,"description":1004,"authors":1008,"heroImage":886,"date":1009,"body":1010,"category":894,"tags":1011},[990],"2023-08-29","\nToday, we are pleased to announce that Google Cloud and GitLab are partnering to integrate GitLab's unique capabilities with Google Cloud. This partnership will combine GitLab's source code management, planning, CI/CD workflow, advanced security, and compliance capabilities with the unified data plane in Google’s Cloud console and Artifact Registry.\n\nWe continually hear developers are frustrated with the increased complexity and security risk of having multiple point solutions in their DevSecOps toolchain. Our new integration will bring multiple tools together to allow them to be fully managed and cloud-hosted.  The integration relieves operators of the duties typically associated with a self-hosted solution, such as applying patches and upgrades and then testing them to make sure things continue to work as expected. Developers will love that they are able to reduce the number of tools and cognitive load needed to develop and ship software faster, with security included from the start.\n\n> Sign up for the Google Software Supply Chain Security and GitLab DevSecOps [integration waitlist](https://page.gitlab.com/interest-gitlab-and-google-security-solution-contact-request.html).\n\n## Powering the DevSecOps lifecycle with scale and visibility \nGoogle’s Software Supply Chain Security pairs with GitLab’s DevSecOps platform to provide system-wide governance and policy enforcement throughout the software development lifecycle. \n\n![Diagram](https://about.gitlab.com/images/blogimages/2023-08-29-gitlab-google-partnership/s3cimage1.png){: .shadow}\n\nThe joint solution replaces a myriad of point solutions that are difficult to manage, maintain, and upgrade. The integration will enable customers to better leverage the benefits of GitLab’s unified DevSecOps workflow with native supply chain security capabilities from Google Cloud. \n\n## Seamless connections for security \nEven before a developer writes any code, they will be able to easily access their GitLab project from the Google Cloud Console. Teams will be able to plan, create issues, and define epics all within GitLab, ensuring security is integrated from the start. \n\nWhen code is ready to be pushed to production, the integration will enable easy registration and configuration of private Google Cloud-powered runners from within GitLab, then utilize CI/CD component templates for deploying to various Google Cloud resources like Google Kubernetes Engine (GKE) and Cloud Run.\n\nOne of the most exciting things for our customers' connected experience will be the ability to use Google’s Artifact Registry with GitLab’s pipelines and packaging to create a security data plane. In this view of the Google Artifact Registry, developers will be able to see a consolidation of security scanning results and the metadata from vulnerability reports in GitLab. A great example of how users will benefit is from having a SLSA-rated provenance telling users where and how software was built, a software bill of materials (SBOM) which provides transparency regarding the content of the software artifacts, and vulnerability impact information gated with Google’s Binary Authorization policies. Outputs from GitLab can be confirmed via attestation and signature such that packages can be prevented from running on a cluster if they do not satisfy the security or verification requirements. \n\n![Artifacts](https://about.gitlab.com/images/blogimages/2023-08-29-gitlab-google-partnership/s3cimage2.png){: .shadow}\n\n\"We are excited to expand our partnership with GitLab to provide our customers end-to-end software supply chain security that is easier and more accessible than ever before,” said Gabe Monroy, VP of Developer Experience at Google Cloud. “I am looking forward to more joint innovation with GitLab in the DevSecOps space with the goal of helping our customers deliver software more rapidly and with greater confidence.\"\n\n## Join our early access program \nWe are excited about how this collaboration will help Google Cloud and GitLab customers ship better, more secure, software faster. To join our early access program, sign up for the [waitlist](https://page.gitlab.com/interest-gitlab-and-google-security-solution-contact-request.html)! \n\n",[894,9,232],{"slug":1013,"featured":6,"template":700},"gitlab-google-partnership-s3c","content:en-us:blog:gitlab-google-partnership-s3c.yml","Gitlab Google Partnership S3c","en-us/blog/gitlab-google-partnership-s3c.yml","en-us/blog/gitlab-google-partnership-s3c",{"_path":1019,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1020,"content":1025,"config":1031,"_id":1033,"_type":13,"title":1034,"_source":15,"_file":1035,"_stem":1036,"_extension":18},"/en-us/blog/gitlab-journey-from-azure-to-gcp",{"title":1021,"description":1022,"ogTitle":1021,"ogDescription":1022,"noIndex":6,"ogImage":801,"ogUrl":1023,"ogSiteName":686,"ogType":687,"canonicalUrls":1023,"schema":1024},"GitLab’s journey from Azure to GCP","GitLab Staff Engineer Andrew Newdigate shares how we completed our migration to Google Cloud Platform, and how we overcame challenges along the way.","https://about.gitlab.com/blog/gitlab-journey-from-azure-to-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s journey from Azure to GCP\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-02\",\n      }",{"title":1021,"description":1022,"authors":1026,"heroImage":801,"date":1028,"body":1029,"category":300,"tags":1030},[1027],"Chrissie Buchanan","2019-05-02","\n\nLast June, we had to face the facts: Our SaaS infrastructure for GitLab.com was not ready for mission-critical workloads, error rates were just too high, and availability was too low. To address these challenges, we decided to migrate from Azure to Google Cloud Platform (GCP) and document the journey publicly, end to end. A lot has happened since [we first talked about moving to GCP](/blog/moving-to-gcp/), and we’re excited to share the results.\n\nAt [Google Cloud Next '19](https://cloud.withgoogle.com/next/sf), GitLab Staff Engineer [Andrew Newdigate](/company/team/#suprememoocow) presented our migration experience and the steps we took to make it happen. Migrations seldom go as planned but we hope that others can learn from the process. Check out the video to learn more about our journey from Azure to GCP, and find some of our key takeaways below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ve_9mbJHPXQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThere were several reasons why we decided on the Google Cloud Platform. One top priority was that we wanted GitLab.com to be suitable for mission-critical workloads, and GCP offered the performance and consistency we needed. A second reason is that we believe [Kubernetes](/solutions/kubernetes/) is the future, especially with so much development geared toward [cloud native](/topics/cloud-native/). Another priority was price. For all of these reasons and more, Google was the clear choice as a partner going forward.\n\nOur company values are important to us and we apply them to all aspects of our work and our migration from Azure to GCP is no exception.\n\n## Three core values guided this project:\n\n###  Efficiency\n\nAt GitLab, [we love boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions). The goal of the project was really simple: Move GitLab.com to GCP. We wanted to find the least complex and most straightforward solution to achieve this goal.\n\n### Iteration\n\nWe focus on shipping the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) and work in steps. When we practice iteration, we get feedback faster, we’re able to course-correct, and we reduce cycle times.\n\n### Transparency\n\nWe work [publicly by default](https://handbook.gitlab.com/handbook/values/#public-by-default), which is why we made [this project accessible to everyone](https://gitlab.com/gitlab-com/migration/) and [documented our progress](https://docs.google.com/document/d/1p3Brri44_SKyakViKB-LGWCmCcwILW6z2A8a8eWFyFc/edit?usp=sharing) along the way.\n\n## How we did it\n\nLooking for the simplest solution, we considered whether we could just stop the whole site: Copy all the data from Azure to GCP, switch the DNS over to point to GCP, and then start everything up again. The problem was that we had too much data to do this within a reasonable time frame. Once we shut down the site, we'd need to copy all the data between two cloud providers, and once the copy was complete, we'd need to verify all the data (about half a petabyte) and make sure it was correct. This plan meant that GitLab.com could be down for _several days_, and considering that thousands and thousands of people rely on GitLab on a daily basis, this wouldn’t work.\n\n![GitLab Geo diagram](https://about.gitlab.com/images/gitlab_ee/gitlab_geo_diagram_migrate.png){: .medium.center}\n\nWe went back to the drawing board. We were working on another feature called [Geo](https://docs.gitlab.com/ee/administration/geo/index.html) which allows for full, read-only mirrors of GitLab instances. Besides browsing the GitLab UI, Geo instances can be used for cloning and fetching projects as well as for a planned failover to migrate GitLab instances.\n\nWe hoped that by taking advantage of the replication capabilities we were building for Geo, we could migrate the entire GitLab.com site to a secondary instance in GCP. The process might have taken weeks or months, but thankfully the site would be available throughout the synchronization process. Once all the data was synchronized to GCP, we could verify it and make sure it was correct. Finally, we could just promote the GCP environment and make it our new primary.\n\nThis new plan had many advantages over the first one. Obviously, GitLab.com would be up during the synchronization and we would only have a short period of downtime, maybe an hour or two, rather than weeks. We could do full QA, load testing, and verify all data before the failover.\n\n>\"If it could work for us on GitLab.com, it would pretty much work for any other customer who wanted to use Geo. We could be confident in that.\" - Andrew Newdigate, Infrastructure Architect at GitLab\n\n![Helm charts](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/helm_charts.png){: .medium.center}\n\nWe were also working on another major project to install and run GitLab on Kubernetes. Much like Omnibus is a package installer for installing GitLab _outside_ a Kubernetes environment, GitLab’s helm charts [install GitLab inside a Kubernetes environment](https://docs.gitlab.com/charts/). The plan evolved to use helm charts to install GitLab in GCP while still using Geo for replication.\n\nIt became apparent there were problems with this approach as we went along:\n\n*   The changes we needed to make to the application to allow it to become fully cloud native were extensive and required major work.\n*   The timeframes of the GCP migration and cloud native projects wouldn’t allow us to carry them out simultaneously.\n\nWe ultimately decided it would be better to postpone the move to Kubernetes until after migration to GCP.\n\nWe went to the next iteration and decided to use Omnibus to provision the new environment. We also migrated all file artifacts, including CI Artifacts, Traces (CI log files), file attachments, LFS objects and other file uploads to [Google Cloud Storage](https://cloud.google.com/storage/) (GCS), moving about 200TB of data off our Azure-based file servers into GCS. Doing this reduced the risk and the scale of the Geo migration.\n\nThe steps for the migration were now fairly straightforward:\n\n*   Set up a Geo secondary in GCP.\n*   Provision the new environment with Omnibus.\n*   Replicate all the data from GitLab.com in Azure to GCP.\n*   Test the new environment and verify all the data is correct.\n*   Failover to the GCP environment and promote it to primary.\n\nThere was only one major unknown left in this plan: The actual failover operation itself.\n\nUnfortunately, **Geo didn’t support a failover operation**, and nobody knew exactly how to do it. It was essential that we executed this perfectly, so we used our value of iteration to get it right.\n\n![GitLab failover procedure issue template](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/issue_template.png){: .medium.center}\n\n*   We set up the failover procedure as an issue template in the GitLab migration issue tracker with each step as a checklist item.\n*   Every time we practiced, we created a new issue from the template and followed the checklist step by step.\n*   After each failover, we would review and consider how we could improve the process.\n*   We would submit these changes as merge requests to the issue template.\n\nThe merge requests were thoroughly reviewed before being approved by the team and through this very tight, iterative feedback loop, the checklist grew to cover every possible scenario we experienced. In the beginning, things almost never went according to plan, but with each iteration, we got better. In the end, there were _over 140 changes_ in that document before we felt confident enough to move forward with the failover. We let Google know and an amazing team was assembled to help us. The failover went smoothly and we didn't experience any major problems.\n\n## Results\n\nGoing back to the goals of the project: Did we make GitLab.com suitable for mission-critical workloads? Firstly, let's consider availability on GitLab.com.\n\n![GitLab Pingdom chart](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/errors_per_day.png){: .shadow.medium.center}\n\nThis [Pingdom](https://www.pingdom.com/) graph shows the number of errors we saw per day, first in Azure and then in GCP. The average for the pre-migration period was 8.2 errors per day, while post-migration it’s down to **just one error a day**.\n\n![GitLab availability](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/gitlab_availability.png){: .shadow.medium.center}\n\nLeading up to the migration, our availability was 99.61 percent. [In our October update](/blog/gitlab-com-stability-post-gcp-migration/) we were at 99.88 percent. As of April 2019, we've improved to **99.93 percent** and are on track to reach our target of 99.95 percent availability.\n\n![GitLab latency chart](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/latency.png){: .shadow.medium.center}\n\nThis latency histogram compares the site performance of GitLab.com before and after moving to GCP. We took data for one week before the migration and one week after the migration. The GCP line shows us that the latencies in GCP drop off quicker, which means GitLab.com is not only faster, it’s more predictable, with fewer outlier values taking an unacceptably long time.\n\n[GitLab users have also noticed the increased stability](https://www.reddit.com/r/gitlab/comments/9f71nq/thanks_gitlab_team_for_improving_the_stability_of/), which is an encouraging sign that we've taken steps in the right direction.\n\nIt's important to note that these improvements can't be attributed to the migration alone – we explore some other contributing factors in [our October update](/blog/gitlab-com-stability-post-gcp-migration/).\n\n\n## What we learned\n\n* Having this amount of visibility into a large-scale migration project is pretty unusual, but it gave us an opportunity to put our values to the test. By opening our documentation to the world, we can collaborate and help others on their own migration journey.\n*  Working by our values gave us the ability to get the quick feedback we needed. Even though we weren’t able to use GitLab on Kubernetes during the migration, we course-corrected and came up with the right solutions.\n* We were able to see exactly how Google developers work and got an up-close look into how one of the fastest-moving companies in the world actually manages its [DevOps lifecycle](/topics/devops/). This knowledge will have a long-term impact on GitLab and how we support these organizations in the future.\n\nIf you would like to learn more about how we migrated to GCP, feel free to take a look at the **[issue tracker](https://gitlab.com/gitlab-com/migration/)** and our **[project documentation](http://bit.ly/2UrlU4s)**.\n",[9,789,810,811],{"slug":1032,"featured":6,"template":700},"gitlab-journey-from-azure-to-gcp","content:en-us:blog:gitlab-journey-from-azure-to-gcp.yml","Gitlab Journey From Azure To Gcp","en-us/blog/gitlab-journey-from-azure-to-gcp.yml","en-us/blog/gitlab-journey-from-azure-to-gcp",{"_path":1038,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1039,"content":1044,"config":1049,"_id":1051,"_type":13,"title":1052,"_source":15,"_file":1053,"_stem":1054,"_extension":18},"/en-us/blog/gitlab-pages-update",{"title":1040,"description":1041,"ogTitle":1040,"ogDescription":1041,"noIndex":6,"ogImage":801,"ogUrl":1042,"ogSiteName":686,"ogType":687,"canonicalUrls":1042,"schema":1043},"Update about GitLab Pages","If you are using GitLab Pages with a custom domain, you may need to update your DNS.","https://about.gitlab.com/blog/gitlab-pages-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update about GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-08-28\",\n      }",{"title":1040,"description":1041,"authors":1045,"heroImage":801,"date":1046,"body":1047,"category":695,"tags":1048},[806],"2018-08-28","\n\nAfter completing our move to Google Cloud Platform (GCP) on August 11, 2018, GitLab.com traffic has been served from our new infrastructure in GCP. For GitLab Pages users, we left a proxy in place in Azure to be backwards compatible for those Pages users who had an A record pointing to the IP Address at our Azure location.\n\nWe had planned a graceful window to let people have time to migrate their DNS records.  In our [July GCP move update](/blog/gcp-move-update/), we referenced the new IP address at GCP that people should use.\n\nIn that transition, users should have moved their DNS records from 52.167.214.135 to 35.185.44.232.\n\nThis week, we started cleanup of parts of our now legacy Azure infrastructure. Unfortunately, that cleanup also caught up the Azure load balancer that had the old 52.167.214.135 IP address for the GitLab pages proxy. We quickly filed a ticket to see if we could reclaim the IP address, but could not be guaranteed that we could get it back when we rebuilt the load balancer. This post is to get the information out for those Pages users who have been affected by this change.\n\n### What you need to know:\n\nIf you are using GitLab Pages with a custom domain AND you have an A record in DNS that points to the old Azure IP, you will need to update your DNS:\n\n|from IP (old)|to IP (new)|\n|",[9,789,810,811],{"slug":1050,"featured":6,"template":700},"gitlab-pages-update","content:en-us:blog:gitlab-pages-update.yml","Gitlab Pages Update","en-us/blog/gitlab-pages-update.yml","en-us/blog/gitlab-pages-update",{"_path":1056,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1057,"content":1062,"config":1068,"_id":1070,"_type":13,"title":1071,"_source":15,"_file":1072,"_stem":1073,"_extension":18},"/en-us/blog/gke-gitlab-integration",{"title":1058,"description":1059,"ogTitle":1058,"ogDescription":1059,"noIndex":6,"ogImage":801,"ogUrl":1060,"ogSiteName":686,"ogType":687,"canonicalUrls":1060,"schema":1061},"GitLab + Google Cloud Platform = simplified, scalable deployment","We’ve teamed up with Google Cloud Platform – here’s what that means for you.","https://about.gitlab.com/blog/gke-gitlab-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab + Google Cloud Platform = simplified, scalable deployment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-04-05\",\n      }",{"title":1058,"description":1059,"authors":1063,"heroImage":801,"date":1065,"body":1066,"category":300,"tags":1067},[1064],"Rebecca Dodd","2018-04-05","\n\nGet super-simple deployment for your app with GitLab and Google Cloud Platform (GCP): thanks to our integration with Google Kubernetes Engine (GKE), you can now get CI/CD and Kubernetes deployment set up with just a few clicks, and [$500 credit](#get-seamless-integration-with-gke-and-500-credit-for-your-project) to get you started.\n\n## Now everyone can get automatic code quality, security testing, and no-configuration deployment\n\nWith increasing adoption of [cloud native](/topics/cloud-native/) practices, the use of [microservices](/topics/microservices/) and containers has become critical to modern software development. Kubernetes has emerged as the first choice for container orchestration, allowing apps to scale elastically from a couple of users to millions. It's been possible to deploy to Kubernetes from GitLab for quite a while, but the process of setting up and managing everything was manual and time intensive.\n\nToday, we’re happy to announce we've been collaborating with Google to make Kubernetes easy to set up on GitLab. Now, with our native [Google Kubernetes Engine integration](/partners/technology-partners/google-cloud-platform/), you can automatically spin up a cluster to deploy applications, with just a few clicks. Simply connect your Google account, enter a few details, and you're good to go! GitLab will create the clusters for you. The clusters are fully managed by Google and run on Google Cloud Platform's best-in-class infrastructure.\n\nThis also means you can easily take advantage of GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). This feature does all the hard work for you, by automatically configuring CI/CD pipelines to build, test, and deploy your application. To make use of Auto DevOps, it used to be necessary to have an in-depth understanding of Kubernetes, and you had to manage your own clusters. Not any more!\n\nWith the integration between GitLab and GKE, we’ve made it simple to set up a managed deployment environment on Google Cloud Platform and access our robust [DevOps capabilities](/topics/devops/). That’s all the benefits of fully automated code quality, security testing, and deployment, with none of the headache of managing and updating your clusters (Google does that all for you!). More than half of developers and 78 percent of managers in our [2018 Global Developer Report](/developer-survey/) agreed that automating more of the software development lifecycle is a top priority for their organization. We hope that this integration gives you a head start, by offering automation out of the box with Kubernetes and Auto DevOps.\n\n## What’s next for GitLab?\n\nWe’re not just excited about offering this integration for you to use, we’re excited to use it ourselves! We’re already in the process of migrating GitLab.com to Google Cloud Platform. For us, the primary reason to migrate was because it has the most mature Kubernetes platform. By moving, we get access to security functionality like default encrypted data at rest, a broad, ever-expanding list of localities served globally, and tight integration with our existing CDN for faster caching. Be on the lookout for more information on our migration as it progresses.\n\n## Get seamless integration with GKE and $500 credit for your project\n\nEvery new Google Cloud Platform account receives $300 in credit [upon signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab’s GKE integration. Here's a link to [apply for your $200 credit](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form).\n\n## Join Google and GitLab for a live demo\n\nOn April 26th, join Google’s [William Denniss](https://www.linkedin.com/in/williamdenniss/) and GitLab’s [William Chia](https://www.linkedin.com/in/williamchia/) for a walkthrough of the new GKE integration. You’ll learn how easy it is to set up a Kubernetes cluster, how to deploy your app using GitLab CI/CD, and how GKE enables you to deploy, update, and manage containerized applications at scale.\n\n[Register today](/webcast/scalable-app-deploy/)!\n",[810,9,232,811,854],{"slug":1069,"featured":6,"template":700},"gke-gitlab-integration","content:en-us:blog:gke-gitlab-integration.yml","Gke Gitlab Integration","en-us/blog/gke-gitlab-integration.yml","en-us/blog/gke-gitlab-integration",{"_path":1075,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1076,"content":1081,"config":1088,"_id":1090,"_type":13,"title":1091,"_source":15,"_file":1092,"_stem":1093,"_extension":18},"/en-us/blog/gke-webcast-recap-post",{"title":1077,"description":1078,"ogTitle":1077,"ogDescription":1078,"noIndex":6,"ogImage":801,"ogUrl":1079,"ogSiteName":686,"ogType":687,"canonicalUrls":1079,"schema":1080},"Scalable app deployment with GitLab and Google Cloud Platform","Get the power to spin up a Kubernetes cluster managed by Google Cloud Platform in a few clicks – watch the demo of our native integration.","https://about.gitlab.com/blog/gke-webcast-recap-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scalable app deployment with GitLab and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-05-10\",\n      }",{"title":1077,"description":1078,"authors":1082,"heroImage":801,"date":1084,"body":1085,"category":695,"tags":1086},[1083],"Suri Patel","2018-05-10","\n\nThe GitLab + Google Kubernetes Engine integration's versatility speeds up software development and delivery while maintaining security and scale, allowing developers to focus on building apps instead of managing infrastructure. William Chia, Senior Product Marketing Manager at GitLab, and guest speaker William Denniss, Product Manager at Google, recently met to discuss the benefits of the integration.\n\n- [What is the GitLab GKE integration?](#what-is-the-gitlab-gke-integration)\n- [What's in the webcast?](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Key takeaways](#key-takeaways)\n- [Webcast Q&A](#webcast-qa)\n\n## What is the GitLab GKE integration?\n\nWith our native Google Kubernetes Engine integration, you can automatically spin up a cluster to deploy applications, with just a few clicks. Simply connect your Google account, enter a few details, and GitLab will create the clusters for you. The clusters are fully managed by Google and run on Google Cloud Platform’s best-in-class infrastructure.\n\n## What's in the webcast\n\nWilliam Chia, Senior Product Marketing Manager at GitLab, and William Denniss, Product Manager at Google, explain how to deploy applications at scale using GKE and GitLab’s robust Auto DevOps capabilities.\n\nWe start with a crash course in Kubernetes, examining containers and deployment, before taking a closer look at the [Google Kubernetes Engine integration](/partners/technology-partners/google-cloud-platform/) and seeing it in action.\n\n## Watch the recording\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uWC2QKv15mk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n#### A seamless collaboration\n\n>Using GitLab with GKE creates an environment in which you just need to merge your code, and GitLab does all the rest. - William Chia, GitLab Senior Product Marketing Manager\n\n#### Kubernetes for success\n\n>If you go with Kubernetes, it gives you a good start. You can hit a button and configure GKE to do it for you and scale massively when you need to. It really sets you up for success. GitLab is a really great way to get started with Kubernetes, because it sets up everything nicely for you in an automated way. - William Denniss, Google Product Manager\n\n## Webcast Q&A\n\nDuring the webcast, live participants chatted in questions to the team. Here are some of the answers that were given via chat along with several questions we didn’t get a chance to answer during the webcast.\n\n>Does Kubernetes have a built-in load balancer?\n\nIt does have support for load balancing across pods within a service. You may also need an external load balancer, in the event you have multiple nodes. Creating a [Kubernetes Service object](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster) and an [external load balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer) are great first steps.\n\n>Is it possible to deploy multiple projects in the same Kubernetes cluster?\n\nIt is, you can add the cluster manually to additional projects. We are also working to make this easier in our UI, with [support for defining clusters at the group level](https://gitlab.com/gitlab-org/gitlab-ce/issues/34758).\n\n>So coming back to the setup of a cluster. If you have a separate environment for development, test, acceptance, and production, it seems we would have multiple options, like multiple clusters, or one cluster with multiple environments. Or even one cluster, one environment and point the correct environment in the `.gitlab-ci.yml` file (environment page in GitLab). What do you recommend to use to have a nice CI/CD integration and still separate environments?\n\nWe support integrating multiple clusters into a single project, and you can define which environments should be deployed to which clusters by [using the environment scope](https://docs.gitlab.com/ee/user/project/clusters/#setting-the-environment-scope).\n\n>Is it possible to add several clusters to the same project? To isolate environments based on clusters rather than namespaces.\n\nYes, this is a feature of GitLab Premium/Silver. (Note: Open source projects on GitLab.com get all of the features of our top-tier plan for free. Public projects on GitLab.com also have this capability.)\n\n>Does GitLab support on-demand cluster creation for integration testing for QA environments?\n\nWe support the integration of multiple clusters, and you can define which cluster each environment should be deployed to. For example, you can state that all review apps should be deployed into one cluster. If you would like to dynamically create a cluster during a test, you of course can do that as well by scripting that in a job.\n\n>Are these features available on GitLab CE?\n\nCluster integration and the main Auto DevOps functionality are available in Core (CE or EE without a license). Some jobs do require Premium, and they are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>The test stages are paid features, right?\n\nMany test jobs are open source features available in Core, and indeed some do require an paid license. The requirements for each job are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>What did you mean: “You can run Enterprise Edition without a license?”\n\nGitLab Enterprise Edition uses a license key to grant you access to the features of the Starter, Premium, and Ultimate plans. If you install Enterprise Edition and don’t have a license key, then you will get access to all of the Core features.\n\n[Learn more about GitLab's tiers](/blog/gitlab-tiers/).\n\n[Learn if you should use Community Edition or Enterprise Edition](/install/ce-or-ee/).\n\n>Is there a free version of GKE for testing and learning?\n\nEvery new Google Cloud Platform account receives $300 in credit upon [signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab’s GKE Integration. This allows you ample usage to test and learn for free.  Visit the Google partner credit page to apply for the $200 additional credit.\n\n>I see there is a $200 credit for playing around with GitLab and GKE. Can you elaborate on that? How to receive it, etc... Is it available for personal use or for professional use only? A contact form opens that wants my professional email address.\n\nThe $200 partner credit is intended for professional use. You can apply by visiting the Google Cloud Platform [partner page](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC) and filling out the form. You'll receive an email from the Google team with a key to redeem your credit.\n\n>Will Prometheus also gather the metrics without Auto DevOps, for example our own `.gitlab-ci.yml`? Or do we need to get something from the DevOps template?\n\nWe detect common system services like the NGINX Ingress or Kubernetes CPU/Memory metrics. If you use the NGINX Ingress deployed from GitLab, it is automatically configured for exporting Prometheus metrics. Additional documentation is available in our [Prometheus documentation](https://docs.gitlab.com/ee/user/project/integrations/prometheus_library/nginx_ingress.html).\n\n>Will you also support AWS?\n\nOther providers are certainly items we are considering for future releases, but we started with GKE since we felt it has the best managed Kubernetes experience available today. Other clusters can always be added manually, with just a few extra steps.\n\n>What if GitLab is running on GKE itself, can you connect the app to the same Kubernetes cluster GitLab is running on? And how safe is it to run this auto-deployment on your existing Kubernetes clusters/cluster GitLab is running on? Looks as if you could easily waste your cluster with this.\n\nIf you’re running GitLab on GKE, you can definitely connect it to the same cluster GitLab is running on to execute your GitLab runners, and as the deployment target for Auto DevOps. I’d advise to use separate namespaces for your GitLab instance to avoid any interference.\n\nNamespaces are the key to achieving workload isolation in Kubernetes; they provide isolation between different deployments to avoid one accidentally influencing the other. If you like (and it’s a bit more configuration), you can even use RBAC to prevent any developer pipelines from ever touching production.\n\nIf you want total isolation, then create a separate GCP project, with a separate cluster for production :) This is definitely the best practice for larger deployments.\n\n>I have been playing around with the `dependency_scanning`/`sast`/`dast` jobs, but the images are not cached on the runner. Will they be cached in (near) future or do we need to add any configuration?\n\nWe use Docker-in-Docker for most of these jobs, so caching is a bit tricky, and we have an [issue tracking this](https://gitlab.com/gitlab-org/gitlab-ce/issues/17861).\n\n>What does GitLab use to create the container image?\n\nAuto DevOps uses Herokuish and Heroku buildpacks to automatically detect and build the application into a Docker image. If you add a Dockerfile to your repo, GitLab will use docker build to create a Docker image.\n\n>Does the GKE/Kubenetes integration require the GitLab installation to be publicly accessible from the internet? Or will it work just as well if the GitLab server is private?\n\nIt does not, but if you deploy a runner to the cluster it will need to be able to access the GitLab server to pick up jobs and do its Git clones.\n\n>How does one manage to different `.env` files for different environments with GitLab CI?\n\nIf you define environment variables at the project level, you can specify which ones are available for which environments by following the [documentation on limiting environment scopes](https://docs.gitlab.com/ee/ci/variables/#limiting-environment-scopes-of-secret-variables).\n\n>What do I do when I receive this error: “We could not verify that one of your projects on GCP has billing enabled. Please try again.”\n\nPlease read the second bullet on the [GCP billing on the documentation page](https://docs.gitlab.com/ee/user/project/clusters/#adding-and-creating-a-new-gke-cluster-via-gitlab), which should help ensure that billing is set up for your account.\n\n>Is there a setting to control the number of review apps which are running live at any given time? Worried about cost.\n\nNote that review apps only run on open Merge Requests. If you are using the Auto DevOps template, then once the code is merged, or the MR is closed, the review app shuts down. Today, there’s not a feature to limit the number of review apps, but there are a few options. Review app environments can be manually stopped from both the MR and the environments page. You can also disable review apps altogether.\n\n>What are requirements for installing the one-click applications to the cluster?\n\nHelm Tiller, Ingress, Prometheus, and GitLab Runner don't have any special requirements to install via one-click. The integration takes care to ensure the appropriate container images are used and everything is configured properly. The only prerequisite is to install Helm Tiller first (since it is used to install the other applications.) If you install these applications manually to your cluster, you can learn about the requirements for each on their respective documentation pages.\n\n>Does this replace solutions like Rancher?\n\nIn a nutshell, yes, the GitLab GKE integration provisions and manages clusters on GKE, alleviating the need for Rancher. But this also depends on your needs. You can use GitLab with or without Rancher. For example, if you are using AKS or EKS, then Rancher will provision and manage your cluster automatically, while this requires manual configuration on GitLab.\n\n>What is the current state of installing GitLab on Kubernetes?\n\nGitLab has two Helm charts for installing GitLab on Kubernetes – the GitLab-Omnibus chart and the cloud native GitLab chart.\n\nGitLab-Omnibus: The best way to run GitLab on Kubernetes today, suited for small deployments. The chart is in beta and will be deprecated by the cloud native GitLab chart.\nCloud native GitLab chart: The next generation GitLab chart, currently in alpha. Will support large deployments with horizontal scaling of individual GitLab components. For more information, please visit [the GitLab Helm chart documentation page](https://docs.gitlab.com/charts/).\n\n>How usable is the new Helm chart for GitLab on Kubernetes?\n\nIt is in alpha, and we plan to have a beta available in May/June. We created [an issue](https://gitlab.com/groups/charts/-/epics/17) to note the items we are working to address before beta.\n\n>How can I enable Auto DevOps if I have `gitlab-ci.yml` file already, but for only build and test?\n\nAuto DevOps will use your custom `gitlab-ci.yml` file if it is present in your repo. If there is no file, then Auto DevOps will use the default Auto DevOps template. You can also see the [Auto DevOps template `gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ci-yml/blob/master/Auto-DevOps.gitlab-ci.yml) and use it as a reference to add/update your `gitlab-ci.yml`. For more information, please visit [the customizing `.gitlab-ci.yml` documentation page](https://docs.gitlab.com/ee/topics/autodevops/#customizing-gitlab-ci-yml).\n\nHave you tried the GitLab + GKE integration? Tweet us [@gitlab](https://twitter.com/gitlab).\n",[810,9,811,1087,874],"webcast",{"slug":1089,"featured":6,"template":700},"gke-webcast-recap-post","content:en-us:blog:gke-webcast-recap-post.yml","Gke Webcast Recap Post","en-us/blog/gke-webcast-recap-post.yml","en-us/blog/gke-webcast-recap-post",{"_path":1095,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1096,"content":1102,"config":1109,"_id":1111,"_type":13,"title":1112,"_source":15,"_file":1113,"_stem":1114,"_extension":18},"/en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"title":1097,"description":1098,"ogTitle":1097,"ogDescription":1098,"noIndex":6,"ogImage":1099,"ogUrl":1100,"ogSiteName":686,"ogType":687,"canonicalUrls":1100,"schema":1101},"Google Cloud integrations for secure Cloud Run deployments at GitLab","This tutorial demonstrates how to use GitLab’s Google Artifact Management integration to deploy to Google Cloud Run, a serverless runtime for containers application.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099336/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750099336757.png","https://about.gitlab.com/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Cloud integrations for secure Cloud Run deployments at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"},{\"@type\":\"Person\",\"name\":\"Matt Genelin\"}],\n        \"datePublished\": \"2025-01-15\",\n      }",{"title":1097,"description":1098,"authors":1103,"heroImage":1099,"date":1105,"body":1106,"category":1107,"tags":1108},[763,1104],"Matt Genelin","2025-01-15","*This tutorial is from a recent Arctiq, GitLab, and Google in-person\nworkshop. The goal was to explore common security challenges faced by\norganizations as they journey to the cloud.*\n\n\nThis tutorial will help you learn about the [Google Cloud integrations in\nGitLab](https://cloud.google.com/docs/gitlab). These features are meant to\nhelp accelerate and improve security of deployments to Google Cloud.\n\n\n![Google integrations\nlist](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099345/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099345112.png)\n\n\n## Prerequisites\n\n\n1. [Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects)  \n\n2. Appropriate [IAM permissions](https://cloud.google.com/iam/docs/) for\nsecurity, Artifact Registry, and Cloud Run usage. For this tutorial, ensure\nyou have the \"Owner\" role with the aforementioned project.\n\n\n## Setting up Workload Identity Federation\n\n\nIn this step, we configure GitLab to connect Google Cloud's Workload\nIdentity Federation to reduce the need for service accounts and let the two\nplatforms use short-lived credentials on-demand.\n\n\n1. On the left sidebar, select **Search** or go to and find your group or\nproject. If you configure this in a group, settings apply to all projects\nwithin by default.  \n\n2. Select **Settings \\> Integrations**.  \n\n3. Select **Google Cloud IAM**.  \n\n4. Input the Project ID and Project number in the respective fields. This\ninformation can be obtained from the Google Cloud console\n[Welcome](https://console.cloud.google.com/welcome) page of your project.  \n\n5. Input the desired Pool ID and Provider ID in the respective fields. These\nare values that you provide and must be unique from other Pool and Provider\nIDs.  \n\n6. Copy the generated command and then go to the **Google Cloud console**.  \n\n7. Run **Cloud Shell** and execute the generated command from the Workload\nIdentity Federation integration page.  \n\n8. Once successful, the **Google Cloud IAM** integration will be designated\nas active in the Integrations list at the GitLab project.\n\n\n## Artifact Registry configuration\n\n\nAs an alternative to GitLab's own place to host artifacts, deploying to\nGoogle Cloud's Artifact Registry is another way to leverage their\ninfrastructure. This section will provide steps on how to use GitLab's\nnative integration with Artifact Registry. Note that Workload Identity\nFederation must already be configured prior to this.\n\n\n1. At the **Google Cloud** console, go to **Artifact Registry** via search\nor the main navigation.  \n\n2. Create a new repository by clicking the **\"+\"** icon. At the creation\npage, provide a name and keep the **Docker** format and **Standard** mode\nselected. Select **Region** and choose **us-central1**. Leave the rest at\nthe default settings and click **Create**.  \n\n3. Once the repository is created and confirmed, go back to your GitLab\nproject.  \n\n4. In your GitLab project, on the left sidebar, select **Settings >\nIntegrations**. Then select **Google Artifact Registry**.  \n\n5. Under Enable integration, select the **Active** checkbox, then complete\nthe fields:  \n   * Google Cloud project ID: The ID of the Google Cloud project where your Artifact Registry repository is located.  \n   * Repository name: The name of your Artifact Registry repository.  \n   * Repository location: The location of your Artifact Registry repository. (`us-central1` is assumed.)  \n6. In **Configure Google Cloud IAM policies**, follow the onscreen\ninstructions to set up the IAM policies in Google Cloud. These policies are\nrequired to use the Artifact Registry repository in your GitLab project.\nSelect **Save** changes.  \n\n7. To view your Google Cloud artifacts, on the left sidebar, select **Deploy\n> Google Artifact Registry**.\n\n\n## Cloud Run configuration\n\n\n1. Enable the Cloud Run API, if not done already. Go to **APIs & Services >\nEnabled APIs & Services**. From there, click **Enable APIs & Services** at\nthe top and search for **Cloud Run Admin API**. Select the search result and\nenable the API.  \n\n2. Configure the IAM policies in Google Cloud to grant permissions to allow\nthe Cloud Run CI/CD component to deploy to Cloud Run.\n\n\n```\n\nGCP_PROJECT_ID=\"\u003CPROJECT ID>\"\n\nGCP_PROJECT_NUMBER=\"\u003CPROJECT NUMBER>\"\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003CPOOL ID>\"\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n## Deploy to Cloud Run\n\n\nIn this section, you will use Gitlab's CI/CD components to deploy to Cloud\nRun, Google Cloud's serverless runtime for containers.\n\n\n1. Go to the GitLab project and from the list of files in the source code,\nfind `.gitlab-ci.yaml`. Click the **file name** and the single file editor\nwill show up. Click the **Edit** button and select the **Open in Web IDE**\noption.  \n\n2. In Web IDE, copy-paste the following code:\n\n\n```\n\nstages:\n    - build\n    - upload\n    - deploy\n```\n\n\nThis code snippet sets up three stages in the pipeline: build, upload, and\ndeploy.\n\n\n1. The next step is to create two CI/CD variables in the same YAML file:\n\n\n```\n\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n```\n\n\nThe first variable, `GITLAB\\_IMAGE`, denotes the container image that the\npipeline creates by default. The second one, `AR\\_IMAGE`, denotes the\nlocation at Google Cloud's Artifact Registry where the container image will\nbe pushed to.\n\n\n2. Next, define the code that will build the container image:\n\n\n```\n\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n```\n\n\nThis code uses [pre-defined CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nfor the Docker commands.\n\n\n3. The final step is using two CI/CD components to deploy to Google Cloud.\nThe first component integrates with Artifact Registry and the second is the\ndeployment to Cloud Run:\n\n\n```\n\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\n\nReplace \u003CPROJECT_ID> with your Google Cloud Project ID. Replace with the\n[Google Cloud region](https://cloud.google.com/compute/docs/regions-zones)\nmost appropriate to your location. `us-central1` is assumed.\n\n\nCommit the changes and push to the main branch. For reference, the final\n`.gitlab-ci.yaml` should look like this, noting to replace the \u003CPROJECT ID>\nand \u003CREGION> with the appropriate values:\n\n\n```\n\nstages:\n    - build\n    - upload\n    - deploy\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\n\n1. Go back to the main GitLab project and view the pipeline that was just\ninitiated. Take note of the stages that should be the same stages that were\ndefined in Step 2.  \n\n2. Once the pipeline is complete, go to the Google Cloud console and then\n**Cloud Run** via search or navigation. A new Cloud Run service called\n`tanuki-racing` should be created.  \n\n3. Click the **service name** and then go to the **Security** tab. Ensure\nthat the service is set to **Allow unauthenticated invocations**. This will\nmake the deployed app publicly available. The app URL posted on screen is\nnow available and should open a new browser tab when clicked.\n\n\nBy utilizing GitLab’s CI/CD pipelines to build and push a containerized\napplication to Google Artifact Registry, you can see the power of GitLab’s\nAI-powered DevSecOps Platform as a means to building secure applications.\nGitLab also deployed the containerized application to Google’s Cloud Run as\na low-cost running application on the public internet. Using GitLab to\ninstrument building an application, pushing a container and triggering a\ncloud run deployment allows DevOps engineers to have the assurance that\nsecure applications are being run on the public-facing internet.\n\n\n> [Sign up for a free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/devsecops/) to begin working\nwith these integrations. Also, check out our [solutions architecture\narea](https://about.gitlab.com/blog/tags/solutions-architecture/) for more\nGitlab and Google Cloud tutorials.\n","product",[232,9,810,723,749],{"slug":1110,"featured":6,"template":700},"google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","content:en-us:blog:google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","Google Cloud Integrations For Secure Cloud Run Deployments At Gitlab","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"_path":1116,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1117,"content":1123,"config":1129,"_id":1131,"_type":13,"title":1132,"_source":15,"_file":1133,"_stem":1134,"_extension":18},"/en-us/blog/google-cloud-next-anthos-kubernetes",{"title":1118,"description":1119,"ogTitle":1118,"ogDescription":1119,"noIndex":6,"ogImage":1120,"ogUrl":1121,"ogSiteName":686,"ogType":687,"canonicalUrls":1121,"schema":1122},"Google Cloud Next: Doubling down on Kubernetes and multi-cloud","Everything you need to know from last week’s big event.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668514/Blog/Hero%20Images/multi-cloud-future.jpg","https://about.gitlab.com/blog/google-cloud-next-anthos-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Cloud Next: Doubling down on Kubernetes and multi-cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Melissa Smolensky\"}],\n        \"datePublished\": \"2019-04-16\",\n      }",{"title":1118,"description":1119,"authors":1124,"heroImage":1120,"date":1126,"body":1127,"category":300,"tags":1128},[1125],"Melissa Smolensky","2019-04-16","\nLast week at Google Next we saw Google bet big on Kubernetes. Google announced Anthos,\na multi-cloud platform based on Kubernetes, as well as Cloud Run, Google Cloud’s commercial Knative offering.\nThe key technology at the center of these two big announcements is Kubernetes.\nAs [Janakiram MSV](https://twitter.com/janakiramm) stated in a [Forbes article](https://www.forbes.com/sites/janakirammsv/2019/04/14/everything-you-want-to-know-about-anthos-googles-hybrid-and-multi-cloud-platform/#68ffc6d05b66) in regards to Anthos,\n\n> The core theme of Anthos is application modernization. Google envisages a future where all enterprise applications will run on Kubernetes.\n\nAnd in his [New Stack article](https://thenewstack.io/how-google-cloud-run-combines-serverless-with-containers/) about Cloud Run,\n\n> Like the way it offered a managed Kubernetes service before any other provider, Google moved fast in exposing Knative through Cloud Run to developers.\n\nFor a quick overview of the news at Google Next, [Brandon Jung](https://twitter.com/brandoncjung),\nVP of Alliances at GitLab, gives a quick recap of the news and how it impacts GitLab. Take a look.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/teRaXAPbfoA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nLaunched by Google in 2014 at the first DockerCon, Kubernetes has become the de facto standard\nfor container orchestration. This May, 12,000 people will gather at KubeCon Barcelona to\nlearn how to implement and use Kubernetes to drive forward cloud native application development within their organizations.\n\nHere at GitLab we embraced Kubernetes early on as well, and we are continuing to take our\ndedication further, putting the power of Kubernetes in the developer workflow.\nEven the CNCF uses GitLab to provide cross-project\ncontinuous integration and interoperability testing.\n\n## Kubernetes throughout every step of the software development lifecycle\n\n“By allowing people to quickly connect Kubernetes clusters to their projects we are helping many\nenterprises embrace the cloud native way of building applications,” says Sid Sijbrandij, CEO at GitLab.\n“By providing a single application we allow enterprise developer and operations teams to embrace\nKubernetes every step of the way in their software development process.\nWe’ve seen a large financial institution go from a single build every two weeks to over 1,000\nself-served builds a day using GitLab. It is wonderful to see the scale we can unlock for organizations\nby providing access to Kubernetes in the developer workflow.”\n\n## GitLab plus Kubernetes\n\nIf you are looking to get started using [Kubernetes with GitLab](/solutions/kubernetes/),\nyou can easily connect any existing Kubernetes cluster on any platform to GitLab by using\nGitLab’s native Kubernetes integration. GitLab even makes it easy to set up and configure new\nclusters with just a few clicks using the Google Kubernetes Engine (GKE) integration.\nOnce connected, teams can install managed applications like Helm Tiller, Ingress,\nand Prometheus to their cluster with a single click in the GitLab interface.\nConnected clusters are available as a deploy target from GitLab CI/CD and are monitored\nusing GitLab’s bundled Prometheus capabilities.\n\nWe love seeing the community embrace GitLab and Kubernetes.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">getting back to grips with \u003Ca href=\"https://twitter.com/hashtag/GitLab?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLab\u003C/a> CICD with \u003Ca href=\"https://twitter.com/hashtag/Terraform?src=hash&amp;ref_src=twsrc%5Etfw\">#Terraform\u003C/a> jobs and knocked up a \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> cluster for the runner! \u003Ca href=\"https://twitter.com/hashtag/devops?src=hash&amp;ref_src=twsrc%5Etfw\">#devops\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/devoops?src=hash&amp;ref_src=twsrc%5Etfw\">#devoops\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/nomorejenkins?src=hash&amp;ref_src=twsrc%5Etfw\">#nomorejenkins\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/SRE?src=hash&amp;ref_src=twsrc%5Etfw\">#SRE\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/GCP?src=hash&amp;ref_src=twsrc%5Etfw\">#GCP\u003C/a>\u003C/p>&mdash; Ferris Hall (@Ferrish07) \u003Ca href=\"https://twitter.com/Ferrish07/status/1106252265218703360?ref_src=twsrc%5Etfw\">March 14, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">I&#39;ve just posted a little experience report. I&#39;m now using \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a>  to spread my build load, thanks to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> and \u003Ca href=\"https://twitter.com/GCPcloud?ref_src=twsrc%5Etfw\">@GCPcloud\u003C/a>. \u003Ca href=\"https://t.co/KGQ9kyEEP5\">https://t.co/KGQ9kyEEP5\u003C/a>\u003C/p>&mdash; Paul Hicks (@tenwit) \u003Ca href=\"https://twitter.com/tenwit/status/1104828372197113856?ref_src=twsrc%5Etfw\">March 10, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"pl\" dir=\"ltr\">GitLab CI/CD &amp;&amp; Kubernetes by Bruno Fonseca \u003Ca href=\"https://t.co/ZDymOsbKfc\">https://t.co/ZDymOsbKfc\u003C/a>\u003C/p>&mdash; Paulo George Bezerra (@paulobezerr) \u003Ca href=\"https://twitter.com/paulobezerr/status/1108049894877659136?ref_src=twsrc%5Etfw\">March 19, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nCover image by [Cody Schroeder](https://unsplash.com/@codyrs) on [Unsplash](https://unsplash.com/photos/L99UKlcUBJY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[789,278,810,9,811],{"slug":1130,"featured":6,"template":700},"google-cloud-next-anthos-kubernetes","content:en-us:blog:google-cloud-next-anthos-kubernetes.yml","Google Cloud Next Anthos Kubernetes","en-us/blog/google-cloud-next-anthos-kubernetes.yml","en-us/blog/google-cloud-next-anthos-kubernetes",{"_path":1136,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1137,"content":1143,"config":1150,"_id":1152,"_type":13,"title":1153,"_source":15,"_file":1154,"_stem":1155,"_extension":18},"/en-us/blog/google-gitlab-serverless-webinar",{"title":1138,"description":1139,"ogTitle":1138,"ogDescription":1139,"noIndex":6,"ogImage":1140,"ogUrl":1141,"ogSiteName":686,"ogType":687,"canonicalUrls":1141,"schema":1142},"Container apps on serverless: Write once, deploy anywhere","Containers, serverless, and microservices, oh my! Cut to the chase and learn how to write apps once and deploy anywhere with emerging technologies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666851/Blog/Hero%20Images/gitlab-serverless-blog.png","https://about.gitlab.com/blog/google-gitlab-serverless-webinar","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write once, deploy anywhere: Containerized applications on modern serverless platforms\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":1144,"description":1139,"authors":1145,"heroImage":1140,"date":1147,"body":1148,"category":300,"tags":1149},"Write once, deploy anywhere: Containerized applications on modern serverless platforms",[1146],"Tina Sturgis","2019-06-13","\n\nUsing containers has become standard practice in app development today. We all get the value of why you want to build with containers. But as a developer, why should you care about [serverless](/topics/serverless/)? It’s simple, you can eliminate worry about the infrastructure that your app is going to run on and focus on the impact of the app itself. Specifically the business logic of how the app will interact with things like the end users and/or operating systems.\n\nThe concepts of serverless quickly move the conversation towards one around a microservices architecture. As we move away from building applications in a monolith, moving towards serverless and eliminating the need to worry about that infrastructure begin to make a lot more sense.\n\nSo now, how do we take these concepts that we hear and/or read about that increase velocity, flexibility, and scalability, and put them into action for your own application development?\n\nFind out at our webinar, \"Running containerized applications on modern serverless platforms\" on Jun. 25, 2019 with GitLab and Google experts. We'll take a deep dive into how new and emerging technologies like Kubernetes, Knative, Cloud Run, and GitLab Serverless can provide great stability and scalability while lowering costs and increasing the pace of innovation.\n\n[Reserve your spot.](https://webinars.devops.com/running-containerized-applications-on-modern-serverless-platforms)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,854,232,108,811],{"slug":1151,"featured":6,"template":700},"google-gitlab-serverless-webinar","content:en-us:blog:google-gitlab-serverless-webinar.yml","Google Gitlab Serverless Webinar","en-us/blog/google-gitlab-serverless-webinar.yml","en-us/blog/google-gitlab-serverless-webinar",{"_path":1157,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1158,"content":1164,"config":1170,"_id":1172,"_type":13,"title":1159,"_source":15,"_file":1173,"_stem":1174,"_extension":18},"/en-us/blog/google-next-2018-recap",{"title":1159,"description":1160,"ogTitle":1159,"ogDescription":1160,"noIndex":6,"ogImage":1161,"ogUrl":1162,"ogSiteName":686,"ogType":687,"canonicalUrls":1162,"schema":1163},"Google Next 2018 Recap","Several GitLab team-members participated in Google Next in San Francisco. Here’s a recap of what went on.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679821/Blog/Hero%20Images/melody-meckfessel-gitlab-google-next-keynote.png","https://about.gitlab.com/blog/google-next-2018-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Next 2018 Recap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-07-27\",\n      }",{"title":1159,"description":1160,"authors":1165,"heroImage":1161,"date":1167,"body":1168,"category":300,"tags":1169},[1166],"William Chia","2018-07-27","\n\n## Google Partner Award Winner for Innovative Solution in Developer Ecosystem\n\nGoogle's Partner Summit kicked off a day before the broader Next conference started. At the summit, we were honored to receive the Google Cloud Partner Award for Innovative Solution in Developer Ecosystem for the [tight integration with GKE](/partners/technology-partners/google-cloud-platform/) we released earlier this year. Of course, we decided to take some fun photos with the cloud logo.\n\n![Sid Sijbrandij and Google execs](https://about.gitlab.com/images/blogimages/google-next-2018/sid-sijbrandij-google-execs.jpg){: .shadow.large.center}\n\n![Sid Sijbrandij and Google tech partner team](https://about.gitlab.com/images/blogimages/google-next-2018/sid-sijbrandij-google-tech-partner-team.jpg){: .shadow.large.center}\n\n![Eliran Mesika with GitLab's award + GitLab team with award](https://about.gitlab.com/images/blogimages/google-next-2018/eliran-mesika-gitlab-google-award-team.jpg){: .large.center}\n\n## Launch partner for GCP Marketplace with Kubernetes Apps\n\n![GCP Marketplace launch partners at Google Next](https://about.gitlab.com/images/blogimages/google-next-2018/gcp-marketplace-launch-partners-google-next.jpg){: .shadow.medium.center}\n\nWhile the GCP Marketplace announcement went out a few days before the show, there was still [a lot of buzz about it at Google Next](https://www.youtube.com/watch?v=C6koWw0r07Y&amp=&t=28m29s). In addition to traditional apps, which deploy VMs on Compute Engine, the new GCP Marketplace now supports Kubernetes apps, which deploy to a Kubernetes cluster running on Google Kubernetes Engine. We were happy to be a launch partner, offering the ability to [install GitLab via the GCP Marketplace](/blog/install-gitlab-one-click-gcp-marketplace/) on day one.\n\n## Serverless, Knative, and Istio\n\n[Knative](https://cloud.google.com/knative/) and [Istio](https://istio.io/) are two new projects announced during the show that we're excited about. Knative enables \"serverless\" workloads on Kubernetes while Istio is a service mesh for microservices. Check out [Josh](/company/team/#joshlambert) chatting live with [Sid](/company/team/#sytses) from the show (where Wi-Fi was a bit choppy) about serverless, Knative, and Istio, and how these technologies can potentially tie in with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/k1jK4F4NoBw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Google Cloud Build + GitLab CI/CD\n\nOne of the key announcements from the show was the introduction of Google Cloud Build, a CI/CD tool for GCP. Many folks asked us if we saw this as competitive to GitLab CI/CD, and how that would affect our partnership with Google. First and foremost, GitLab supports a multi-cloud strategy. We partner with all of the major cloud vendors to ensure GitLab CI/CD can support multi-cloud deployments. Many cloud vendors have their own CI/CD tooling, like AWS Code Deploy or IBM Cloud Pipelines. For us, Cloud Build is just another point of collaboration. In fact, our own [Josh Lambert](/company/team/#joshlambert) teamed up with [Christopher Sanson](https://www.linkedin.com/in/christophersanson/) to create a GitLab + Google demo for Christopher's session, \"CI/CD for Hybrid and Multi-Cloud Customers.\"\n\n![Christopher Sanson demos GitLab CI/CD with Cloud Build](https://about.gitlab.com/images/blogimages/google-next-2018/christopher-sanson-gitlab-cicd.jpg){: .shadow.medium.center}\n\nFirst, Christopher showed how to use GitLab as your code repo with Cloud Build as your CI/CD connected up via webhooks to Cloud Functions. Here's a link to some [sample code for setting up a Cloud Function to trigger cloud build from GitLab](https://gitlab.com/joshlambert/cloud-function-trigger) if you'd like to try it out yourself.\n\nThen Christopher showed how to use GitLab CI/CD and GitLab container registry while offloading the infrastructure build to Google Cloud Build. Using Google Cloud Build together with GitLab CI/CD is one way to overcome some of the security problems of docker-in-docker (e.g. requires privileged containers). Check out the video below to see it in action. Additionally, here's an example ruby app with a [sample configuration for connecting Gitlab CI/CD to Cloud Build](https://gitlab.com/joshlambert/minimal-ruby-app/merge_requests/1/diffs).  \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IUKCbq1WNWc?start=1324\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## GitLab.com is migrating to GCP\n\n![Melody Meckfessel talks GitLab GCP migration during keynote](https://about.gitlab.com/images/blogimages/google-next-2018/melody-meckfessel-gitlab-google-next-keynote.png){: .shadow.medium.center}\n\n>\"Our friends at GitLab have created a complete open source DevOps stack\" - [Melody Meckfessel](https://www.linkedin.com/in/melodymeckfessel/), Vice President of Engineering, Google Cloud Platform\n\nAs part of our plans to make GitLab.com a rock solid, enterprise-ready SaaS offering, we are migrating from Azure to Google Cloud Platform. We’ve been carefully planning this migration for many months and are now very close to executing with a target migration date of August 11. Melody Meckfessel talked a bit about our migration during her keynote on Thursday. Check out our previous blog post to read up on the [full details of GitLab’s GCP migration](/blog/gcp-move-update/).  \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JQPOPV_VH5w?start=1363\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Talking to you\n\n![William, Mike, and Reb in the GitLab booth](https://about.gitlab.com/images/blogimages/google-next-2018/william-chia-mike-walsh-gitlab-booth-duo.jpg){: .shadow.large.center}\n\nOf course one of our favorite parts of any trade show is getting to meet our users and customers face to face. We love hearing the palpable excitement when you talk about how GitLab is streamlining your toolchain or easing your move to Kubernetes. We love sharing the story with folks who don’t know yet and seeing their faces light up when we tell them GitLab’s not just a version control solution, but an end-to-end DevOps application with built-in project planning, CI/CD, container registry, monitoring, and more. Google Next ’18 was a great show, and we can’t wait to see you next time! Check out the [full list of events](/events) we’ll be at to find one close to you.\n",[278,9,789,810,811],{"slug":1171,"featured":6,"template":700},"google-next-2018-recap","content:en-us:blog:google-next-2018-recap.yml","en-us/blog/google-next-2018-recap.yml","en-us/blog/google-next-2018-recap",{"_path":1176,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1177,"content":1183,"config":1189,"_id":1191,"_type":13,"title":1192,"_source":15,"_file":1193,"_stem":1194,"_extension":18},"/en-us/blog/google-next-2018-security-track-recap",{"title":1178,"description":1179,"ogTitle":1178,"ogDescription":1179,"noIndex":6,"ogImage":1180,"ogUrl":1181,"ogSiteName":686,"ogType":687,"canonicalUrls":1181,"schema":1182},"Google Next 2018 security track recap","Here's how one GitLab team-member made the most of the security track at Google Next 2018.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678940/Blog/Hero%20Images/securitygooglenext.jpg","https://about.gitlab.com/blog/google-next-2018-security-track-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Next 2018 security track recap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jim Thavisouk\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":1178,"description":1179,"authors":1184,"heroImage":1180,"date":1186,"body":1187,"category":300,"tags":1188},[1185],"Jim Thavisouk","2018-08-10","\nEvery time someone asks me how I like working at GitLab, I say, \"I love it here!\"\nWith our [company culture](https://handbook.gitlab.com/handbook/values/), 100 percent [remote workforce](/company/culture/all-remote/), and [growing team](/jobs/), it's a pleasure\nto work with such a high energy team.\nThe [security department](https://handbook.gitlab.com/handbook/security/#security-department)\nis continually growing -- very fast! We each have our own specialties and bring a diverse selection\nof strong experiences, while working very well together. In my position, I have\nbeen focusing very heavily on policy as code to raise the bar in security here at GitLab. This blog post was inspired by [William Chia](/company/team/#thewilliamchia)'s\n[Google Next 2018 recap](/blog/google-next-2018-recap/). If you haven't read it, I highly recommend it!\n\n## Security highlights of Google Next 2018\n\n### Forseti\n\nI was excited coming into this conference for [Forseti](https://forsetisecurity.org/),\nespecially with the announcement of\n[Forseti 2.0](https://forsetisecurity.org/news/2018/06/11/forseti-2.0-launch.html).\nWe had a [Forseti Hack Day](https://groups.google.com/a/forsetisecurity.org/forum/#!topic/announce/bHy8QCK_AY0)\nthat kicked off a day before the actual conference, which allowed me to interact\nwith Google engineers, product managers, and Forseti customers. For\nanyone who missed Forseti's session from [Chris Law](https://www.linkedin.com/in/chrislaw/),\n[Michael Capicotto](https://www.linkedin.com/in/mcapicotto/), and\n[Marten Van Wezel](https://www.linkedin.com/in/martenvanwezel/), you can check it out\n[the recording](https://www.youtube.com/watch?v=4TrlgbV_VlQ). See [the details for joining the discussion here](https://groups.google.com/a/forsetisecurity.org/forum/#!topic/announce/8OSAB7UEzSY).\n\n### Istio\n\n[\"Istio is platform-independent and designed to run in a variety of environments,\nincluding those spanning Cloud, on-premise, Kubernetes, Mesos, and more.\"](https://istio.io/docs/concepts/what-is-istio/)\nI'm excited to see Istio 1.0, which was just released a few days ago! See [the team's talk](https://youtu.be/eOI2aM9P7-c)\nfrom [Tao Li](https://www.linkedin.com/in/tao-li-1a447935/) and\n[Samrat Ray](https://www.linkedin.com/in/samratray/).\n\n### Best practices\n\nEveryone can use best practices. At Forseti Hack Day, I met [Tom Salmon](https://www.linkedin.com/in/tomcsalmon/)\nwho has vast experience in security. In his [talk](https://www.youtube.com/watch?v=ZQHoC0cR6Qw),\nhe provides a great knowledge base and reference point to best security practices in GCP.\n\n### Sessions are now live\n\nThese were only a few sessions at Google Next, and there are hundreds of others\nto check out. You can find them neatly categorized on\n[YouTube](https://www.youtube.com/channel/UCTMRxtyHoE3LPcrl-kT4AQQ/playlists?flow=grid&view=50&shelf_id=8).\n\n## We'd love to hear your feedback\n\nWe'd love to hear from you on how you use any of these products in your environment.\nOur team is currently working very closely with the Forseti team, and I'm sure they\nwould love to have you join in on the discussion as well. Don't hesitate to\nreach out directly to me by email (jthavisouk@gitlab.com) or join any of these groups to keep a dialogue going\nabout any of these products. We can only help each other in the process.\n",[278,9,789,810,811,896],{"slug":1190,"featured":6,"template":700},"google-next-2018-security-track-recap","content:en-us:blog:google-next-2018-security-track-recap.yml","Google Next 2018 Security Track Recap","en-us/blog/google-next-2018-security-track-recap.yml","en-us/blog/google-next-2018-security-track-recap",{"_path":1196,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1197,"content":1203,"config":1212,"_id":1214,"_type":13,"title":1215,"_source":15,"_file":1216,"_stem":1217,"_extension":18},"/en-us/blog/gsoc-at-gitlab",{"title":1198,"description":1199,"ogTitle":1198,"ogDescription":1199,"noIndex":6,"ogImage":1200,"ogUrl":1201,"ogSiteName":686,"ogType":687,"canonicalUrls":1201,"schema":1202},"Google Summer of Code at GitLab – some intern highlights","GitLab team members mentored student interns and helped them develop open source projects during Google Summer of Code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682129/Blog/Hero%20Images/gsoc_cover.jpg","https://about.gitlab.com/blog/gsoc-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Summer of Code at GitLab – some intern highlights\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aakriti Gupta\"}],\n        \"datePublished\": \"2021-09-01\",\n      }",{"title":1198,"description":1199,"authors":1204,"heroImage":1200,"date":1206,"body":1207,"category":300,"tags":1208},[1205],"Aakriti Gupta","2021-09-01","\n\nGitLab participated in [Google Summer of Code](https://summerofcode.withgoogle.com/) for the first time this year. We hosted four student interns to work with us on four different projects under the supervision of two or three mentors each.\n\nFor the past 16 years, Google has hosted the Summer of Code to introduce students to the world of open source. Over the summer, student interns work on a project with an open source organization and are closely mentored by the developers of the open source project. More than [200 organizations](https://summerofcode.withgoogle.com/organizations/) participated this year.\n\nWe started off the summer with a two-week long community bonding period to get our students familiar with how we work at GitLab and helped them set-up their local development environments. During the 10-week program we worked through scoped projects with regular check-ins and [a final demo](https://youtu.be/--Neg5pwwnI) to conclude the program.\n\n## Meet the students\n\n### Alejandro Rusi\n[Alejandro](https://gitlab.com/rusi-ruse), a CS student from Argentina, worked on [enabling Courseware as Code](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/4) through his project. Check out his [video presentation here](https://youtu.be/qgQQ4MgnKR4) and read [more about the project here](https://alejandro-rusi.gitlab.io/2021/05/31/toward-courseware). Alejandro said:  \n\n> They quickly made me feel welcome and part of Gitlab. All of the topics to choose from were very interesting, and all mentors seemed great.\n>\n> I would like to highlight a moment during GSoC where I wasn't able to do my normal workload due to a personal problem, and my mentors where incredibly supportive and understanding.\n\n### Anshuman Singh\n\n[Anshuman](https://gitlab.com/singhanshuman), a CS student who joined us from India, collaborated with the Static Analysis team to work on [writing vulnerability detection rules  for SAST](https://gitlab.com/groups/gitlab-org/-/epics/6089). Anshuman said: \n\n> For a beginner, it is normal to feel insecure about achieving specified tasks in your group.\n>\n> I am glad that my mentors Julian and Ross were there at every step of the program to provide support and clear my doubts about anything. It was such an enriching experience for me. I am glad to be the part of GitLab for this Google Summer of Code edition. :)\n\n### Cyrine Gamoudi\n\nA computer engineering student from Tunisia, [Cyrine](https://gitlab.com/CyrineG1) worked with the Static Analysis team on [porting SAST and Secret Detection rails platform code to GitLab CE](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/6).\n\n> The project went very smoothly. I was able to achieve almost all of the planned milestones and I'm currently still in contact with my mentors, working on what was left. I enjoyed getting an inside look into how open source projects are maintained as well as how they evolve through time. It was also interesting to see the impact of historical architectural decisions on what could and could not be done later on.\n\n### Shubham Kumar\n\nNow in his final year of schooling, [Shubham](https://gitlab.com/imskr) from India helped the Geo team [improve our backup and restore features](https://shubhamkumar.live/blog/Improving-Backup-and-Restore-For-GitLab-GSoC-2021/).\n\n> Mentorship was amazing. Mentors helped me a lot whenever I had problem. Contributing to GitLab is very welcoming. I absolutely loved it.\n\n## GitLab mentors share their thoughts \n\n### What went well?\n\n- External organization\n  - The folks at Google were well organized, the entire schedule was available right at the beginning and the reminder emails were very informative and well timed.\n  - We used it to create our own calendar and that was very helpful.\n- Asynchronous working style\n\n> Having recorded meetings and an agenda doc was really helpful, especially for cases where one mentor went on holidays it was easy to catch up on things. Writing up a planning epic with our student Anshuman was really helpful to make sure that we were on the same page and to clearly define the project deliverables. - [Julian Thome](/company/team/#julianthome), senior vulnerability research engineer at GitLab. \n>\n> Related to this, GitLab's default mode of working that favors asynchronous communication and the written form feels very well-aligned with GSoC and working across time zones. Even without a large amount of overlap between working for myself and our mentee, it felt very effective and like we had a strong foundation in place to support communication and workflows (just point to our existing handbook and docs). - [Lucas Charles](/company/team/#theoretick), staff backend engineer, Secure, at GitLab.\n\n  - It was really useful to have two mentors on the project. This way it was easier sharing responsibilities and managing other priorities, especially when one mentor was out.\n\n### What could be improved? \n\n- We had considerable engagement on the project proposal issues but not as many applications.\n- GitLab is huge and a complex object model for students to hold onto.\n- Running GitLab locally requires a lot of resources.\n- The fork contribution model wasn't efficient for some projects.\n\n\n- Define the required skills for the project better\n> Since GSoC is 10 short weeks, making sure that the student has acquired all the required skills for the project before it starts would have allowed us to reduce the overall mentoring workload and to use mentoring time more efficiently by focusing on the project objectives. Next year, we can make better use of the \"Community Bonding\" period by giving the students more guidance and some time upfront to learn the required technologies/languages so that they are fully prepared before the coding phase begins. -  Julian\n\n- A clear \"victory task,\" possibly in the frontend, would have made some of the projects more \"visible\" and would have felt more complete.\n\n## Wrapping up\n\n[Tetiana Chupryna](/company/team/#brytannia), senior backend engineer, Secure, at GitLab sums up the experience of the mentors really well: \n\n> This program gave me a feeling of deep fulfilment as I was able to look at GitLab through the eyes of a community contributor and I hope that this project was useful for our student in her career, and she will return to GitLab one day as a contributor (we were lucky to have her on this project). So it was a summer well spent 🍎.\n\nWe hope GitLab can be back at Google Summer of Code next year!\n\n[Cover image](https://unsplash.com/photos/7RQf2X6aXXI) by [Raphaël Biscaldi](https://unsplash.com/@les_photos_de_raph)\n{: .note}\n\n",[268,1209,9,1210,1211],"contributors","open source","remote work",{"slug":1213,"featured":6,"template":700},"gsoc-at-gitlab","content:en-us:blog:gsoc-at-gitlab.yml","Gsoc At Gitlab","en-us/blog/gsoc-at-gitlab.yml","en-us/blog/gsoc-at-gitlab",{"_path":1219,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1220,"content":1226,"config":1232,"_id":1234,"_type":13,"title":1235,"_source":15,"_file":1236,"_stem":1237,"_extension":18},"/en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud",{"title":1221,"description":1222,"ogTitle":1221,"ogDescription":1222,"noIndex":6,"ogImage":1223,"ogUrl":1224,"ogSiteName":686,"ogType":687,"canonicalUrls":1224,"schema":1225},"Auto Deploy a GitLab.com project to Google Cloud","How to get started with our auto deploy feature using Google Kubernetes Engine.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Auto Deploy a GitLab.com project to Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dmitriy Zaporozhets\"}],\n        \"datePublished\": \"2017-08-10\",\n      }",{"title":1221,"description":1222,"authors":1227,"heroImage":1223,"date":1229,"body":1230,"category":695,"tags":1231},[1228],"Dmitriy Zaporozhets","2017-08-10","\n\nFor up-to-date information on GitLab's native integration with Google Kubernetes Engine, please visit the announcement blog post: [GitLab + Google Cloud Platform = simplified, scalable deployment](/blog/gke-gitlab-integration/).\n{: .alert .alert-gitlab-orange}\n\nOnce you write your code the next question is, \"How do you deploy it?\" There are plenty of ways to do it but none of them is perfect. You need to configure external tools, write your own scripts and maybe even do manual command execution every time you want a new version deployed. At GitLab we believe deployment should be an essential part of workflows, like code review and CI. Several months ago we shipped an amazing feature, [auto deploy](https://docs.gitlab.com/ee/topics/autodevops/stages.html), that should take care of code deployment for you. Finally, I found some time to give it a try.\n\n\u003C!-- more -->\n\n## What is GitLab Auto Deploy?\n\nOriginally released in [8.15](/releases/2016/12/22/gitlab-8-15-released/#auto-deploy) and heavily improved since then, auto deploy should deploy your application as part of a CI/CD pipeline within the GitLab user interface. It means you can set up an application to be deployed automatically every time a new commit lands into the `master` branch.\n\nAs per the documentation, the feature will package your application into a Docker image that then will be deployed to Kubernetes. GitLab has a container registry feature so the Docker image will be stored within GitLab too.\n\nSetup of the GitLab auto deploy feature is a matter of clicking on the \"Set up auto deploy\" button and applying the \"Kubernetes\" template to your `.gitlab-ci.yml` file. However, before this, you need to configure your GitLab project with valid credentials so it can access your cluster. Which brings us to the point that you need your Kubernetes cluster first.\n\n## Why Google Kubernetes Engine?\n\nThe documentation states, \"Google Kubernetes Engine is a managed environment for deploying containerized applications.\" It runs Kubernetes and you can create your cluster with a few clicks in the web interface.\n\nThere are [a few other](https://kubernetes.io/docs/setup/pick-right-solution/) Kubernetes hosting solutions available on the market and you might prefer a different one, but as a newcomer I decided to go with Google for a few reasons:\n\n* It's number one on the list of proposed solutions on Kubernetes doc.\n* It gives a nice free tier (300$ at the time of this blog post) which is enough for experiments.\n* Google originally started Kubernetes, so I expected some level of maturity from the service.  \n\n## How to tie it all together\n\nIt took me some time to figure out the sequence of events and actions to make it work. This is the result:\n\n1. Create GitLab.com project with Dockerfile\n2. Create cluster\n3. Copy credentials to GitLab.com project\n4. Apply auto deploy template to `.gitlab-ci.yml`\n\nAs a result, I have a [Ruby application](https://gitlab.com/dzaporozhets/minimal-ruby-app) that is built and deployed to staging automatically once I push code to the master branch. Additionally, I can manually deploy any pipeline to production with a single click.  \n\nFor those who are new to Kubernetes but want to try GitLab auto deploy in action, I made a [quick start guide](https://docs.gitlab.com/ee/topics/autodevops/stages.html).\n\n[Cover image](https://unsplash.com/@jbcreate_?photo=eUMEWE-7Ewg) by [Joseph Barrientos](https://unsplash.com/@jbcreate_) on Unsplash\n{: .note}\n",[9,810],{"slug":1233,"featured":6,"template":700},"how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud","content:en-us:blog:how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud.yml","How To Auto Deploy A Gitlab Dot Com Project To Google Cloud","en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud.yml","en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud",{"_path":1239,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1240,"content":1246,"config":1252,"_id":1254,"_type":13,"title":1255,"_source":15,"_file":1256,"_stem":1257,"_extension":18},"/en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"title":1241,"description":1242,"ogTitle":1241,"ogDescription":1242,"noIndex":6,"ogImage":1243,"ogUrl":1244,"ogSiteName":686,"ogType":687,"canonicalUrls":1244,"schema":1245},"How to deploy a PHP app using GitLab's Cloud Run integration","Are you using PHP and want an easy way to deploy your application to Google Cloud? Follow this guide to deploy your app with Google Cloud Run in under 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098264/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_519147119_2RafH61mqosMZv8HGAlsUj_1750098264407.jpg","https://about.gitlab.com/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a PHP app using GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":1241,"description":1242,"authors":1247,"heroImage":1243,"date":1249,"body":1250,"category":695,"tags":1251},[1248,745],"Christian Nnachi","2024-12-10","Writing PHP application code and ensuring the application is running\nsmoothly in production are often two different skills sets owned by two\ndifferent engineers. GitLab aims to bridge the gap by enabling the engineer\nwho has written the PHP application code to also deploy it into Google Cloud\nPlatform with little effort. \n\n\nWhether you own event-driven, long-running services or deploy containerized\njobs to process data, Google Cloud Run automatically scales your containers\nup and down from zero — this means you only pay when your code is running.\n\n\nIf you are a PHP developer who would like to deploy your application with\nminimal effort to Google Cloud Platform, this guide will show you how using\nthe GitLab Google Cloud Run integration. \n\n\n# Overview\n\n\n- Create a new project in GitLab\n\n- Set up your PHP application\n\n- Utilizing the Google Cloud integration, create a Service account\n\n- Utilizing the Google Cloud integration, configure Cloud Run via merge\nrequest\n\n- Try adding another endpoint\n\n- Clean up\n\n\n## Prerequisites\n\n- Owner access on a Google Cloud Platform project\n\n- Working knowledge of\n[PHP](https://www.php.net/manual/en/introduction.php), an open-source,\ngeneral-purpose scripting language\n\n- Working knowledge of [GitLab\nCI](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\n\n- 10 minutes\n\n\n## 1. Create a new project in GitLab.\n\n\nWe decided to call our project `PHP cloud-run` for simplicity.\n\n\n![PHP cloud- run\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098287/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098287615.png)\n\n\nThen, create an index.php\napp[https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php](https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php).\n\n\n```php\n\n\u003C?php\n\n\n$name = getenv('NAME', true) ?: 'World';\n\necho sprintf('Hello %s!', $name);\n\n```\n\n\n## 2. Utilizing the Google Cloud integration, create a Service account.\n\n\nNavigate to **Operate > Google Cloud > Create Service account**. \n\n\n![Create Service account\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098287616.png)\n\n\nThen configure the region you would like the Cloud Run instance deployed to.\n\n\n![Configure region\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098287618.png)\n\n\n## 3. Utilizing the Google Cloud integration, configure **Cloud Run via\nmerge request**.\n\n\n![Deployment configuration\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098287620.png)\n\n\nThis will open a merge request. Immediately merge this merge request.\n\n\n![Enable Deployments to Cloud run\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098287622.png)\n\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`,  `GCP_SERVICE_ACCOUNT`, and\n`GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the\nprevious steps.\n\n\n![Variables\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098287624.png)\n\n\nCheck your pipeline and you will see you have successfully deployed to\nGoogle Cloud Run utilizing GitLab CI.\n\n\n![merge branch\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098287625.png)\n\n\n\u003Cbr>\u003C/br>\n\n\n![Google Cloud Run deployed with GitLab\nCI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098287627.png)\n\n\n## 4. Click the **Service URL** to view your newly deployed Flask server.\n\n\nIn addition, you can navigate to **Operate > Environments** to see a list of\ndeployments for your environments.\n\n\n![Environments\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098287628.png)\n\n\nBy clicking on the environment called **main**, you’ll be able to view a\ncomplete list of deployments specific to that environment.\n\n\n![Main\nenvironment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098287631.png)\n\n\n## 5. Add another endpoint\n\n\nTo get started with developing your PHP application, try adding another\nendpoint. For example, in your main file, you can add a `/bye` endpoint like\nthis:\n\n\n```\n\n\n\u003C?php\n\n\n$name = getenv('NAME', true) ?: 'World';\n\n\nif ($_SERVER['REQUEST_URI'] == '/bye') {\n    echo sprintf('Goodbye %s!', $name);\n} else {\n    echo sprintf('Hello %s!', $name);\n}\n\n\n```\n\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy\nthe updates. Once the job is complete, go back to the Service URL and\nnavigate to the `/bye` endpoint to see the new functionality in action.\n\n\n### Clean up\n\n\nTo prevent incurring charges on your Google Cloud account for the resources\nused in this tutorial, you can either delete the specific resources or\ndelete the entire Google Cloud project. For detailed instructions, refer to\nthe [cleanup guide\nhere](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n\n> Check out more [easy-to-follow tutorials from our Solutions Architecture\nteam](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[749,723,9,232],{"slug":1253,"featured":6,"template":700},"how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","content:en-us:blog:how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","How To Deploy A Php App Using Gitlabs Cloud Run Integration","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"_path":1259,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1260,"content":1265,"config":1272,"_id":1274,"_type":13,"title":1275,"_source":15,"_file":1276,"_stem":1277,"_extension":18},"/en-us/blog/how-to-make-your-devops-team-elite-performers",{"title":1261,"description":1262,"ogTitle":1261,"ogDescription":1262,"noIndex":6,"ogImage":844,"ogUrl":1263,"ogSiteName":686,"ogType":687,"canonicalUrls":1263,"schema":1264},"How to make your DevOps team elite performers","Every company wants DevOps done better. The DORA Report spotlights what it takes to be a DevOps elite, and what teams need to do to get there.","https://about.gitlab.com/blog/how-to-make-your-devops-team-elite-performers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make your DevOps team elite performers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":1261,"description":1262,"authors":1266,"heroImage":844,"date":1268,"body":1269,"category":852,"tags":1270},[1267],"Sharon Gaudin","2021-10-26","\n\nSo your company has a DevOps team –  great! – but are they elite performers or low performers?\n\nThere’s a chasm of difference between the two, according to the [State of DevOps 2021 report](https://gitlab.com/gitlab-com/www-gitlab-com/uploads/069ee8e2ee6af463cf0aafcd89eda33e/state-of-devops-2021.pdf) from DORA, the DevOps Research and Assessment team at Google. It’s the tipping point in how resilient, efficient and reliable your team is, and that’s directly tied to your ability to help your business be more competitive. (To be transparent, GitLab was one of the many sponsors of the report, and we’ve incorporated some of the DORA metrics [within our DevOps Platform](https://gitlab-com.gitlab.io/cs-tools/gitlab-cs-tools/what-is-new-since/?tab=features&s[…]tegories=DevOps+Reports&textSearch=DevOps&minVersion=13_08) so you can compare your highest and lowest-performing teams and see how much of the DevOps lifecycle each one is embracing.)\n\nBragging rights aside, a personal -- and not insignificant -- benefit of being on an elite DevOps team is that your [company value](/blog/a-look-at-devops-salaries/), as well as your [salary](/blog/four-tips-to-increase-your-devops-salary/), would likely rise, as would your ability to be hired at a top-tier company. \n\nSo what does it mean to be an elite DevOps team and what does it take to get there? Let’s dive in:\n\n## The benefits of being an elite team\n\nAccording to the DORA report there are specific things elite teams are able to consistently do. Here’s a look at some big goals:\n\n### Deploy more frequently\n\nElite performers deploy code 973 times more frequently than low performers, the survey notes. That’s right -- 973 times more. Low performers say they require a change lead time greater than six months. In sharp contrast, elite teams only need an hour. We’ll do the math for you: Elite teams have a 6,570 times faster lead time from commit to deploy than low performers.\n\n### Recover quicker\n\nThere’s a similar broad gap between low performers and elite teams when it comes to stability. DORA notes the time it takes the elite group to restore service is less than one hour, compared to more than six months for the low performers. \n\n### Lower change failure rates\n\nWhen it comes to change failure rates, there’s a 3 times difference between top and bottom performers. That means the elite group’s changes are a third less likely to fail. \n\n## DORA’s tips on how to become an elite team\n\nThose are great goals but how do you make them a reality? These six tips will take you in the right direction\n\n### 1. Make smart use of hybrid and multi-cloud environments\n\nDORA survey respondents who use either hybrid cloud or [multi-cloud](/topics/multicloud/) environments were 1.6 times more likely to beat their company’s performance targets than those who did not use these cloud setups. Multi-cloud users, for instance, say they are able to leverage each cloud provider’s unique benefits and achieve greater availability.\n\n### 2. How you implement the cloud matters\n\nWhen it comes to being able to support business needs, how the cloud is adopted and implemented makes a big difference. There’s a lot of benefit to adhering to the National Institute of Standards and Technology’s (NIST) [five essentials of cloud computing](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.500-291r2.pdf): on-demand self-service, broad network access, resource pooling, rapid elasticity or expansion, and measured service. DORA noted elite performers were 3.5 times more likely to have met all essential NIST cloud characteristics.\n\n### 3. Let DevOps and SRE complement each other\n\nTop DevOps professionals understand they don’t have to choose between DevOps and [site reliability engineering (SRE)](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). They work well together. “Elite performers are 2.1x as likely to report the use of SRE practices as their low-performing counterparts,” the DORA report notes. “But even teams operating at the highest levels have room for growth: Only 10% of elite respondents indicated that their teams have fully implemented every SRE practice we investigated.”\n\n### 4. Make sure you’re documenting\n\nThere’s a direct correlation between creating documents, which include everything from manuals to code comments, to a DevOps team’s success. Solid documentation is accurate, up-to-date, comprehensive, searchable, well organized and clear. The report points out that teams with good documentation are 2.4 times more likely to meet or exceed their reliability targets, and 2.5 times more likely to fully leverage the cloud.\n\n### 5. Build in security throughout development\n\nSecurity can get [a lot of lip service in DevOps](/blog/developer-security-divide/), but the best teams know that high delivery and operational performance are directly linked to integrating security practices throughout their development process. Security reviews must be integrated into every phase and applied to all major features, security professionals must be included in planning and development, and security testing must be automated.\n\n### 6. Pay attention to your team culture\n\nIn short, culture matters -- a lot. Industry surveys consistently show that culture is one of the top drivers of IT performance. Professionals who [have a sense of belonging and inclusion](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/), and who work collaboratively and cross-functionally, produce higher software delivery and operational performance. \n\nAccording to the DORA report, it’s clear that becoming an elite team of DevOps professionals is an attainable goal. The report notes a dramatic increase in the percentage of elite professionals this year: 26% (of 1,200 surveyed), up from just 7% in 2018. \n\nIt’s time to up your game or risk being left behind.\n\n_For a slightly different look at aspirational DevOps results, read our [2021 Global DevSecOps Survey](/developer-survey/)._\n\n",[854,1271,9],"careers",{"slug":1273,"featured":6,"template":700},"how-to-make-your-devops-team-elite-performers","content:en-us:blog:how-to-make-your-devops-team-elite-performers.yml","How To Make Your Devops Team Elite Performers","en-us/blog/how-to-make-your-devops-team-elite-performers.yml","en-us/blog/how-to-make-your-devops-team-elite-performers",{"_path":1279,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1280,"content":1286,"config":1293,"_id":1295,"_type":13,"title":1296,"_source":15,"_file":1297,"_stem":1298,"_extension":18},"/en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"title":1281,"description":1282,"ogTitle":1281,"ogDescription":1282,"noIndex":6,"ogImage":1283,"ogUrl":1284,"ogSiteName":686,"ogType":687,"canonicalUrls":1284,"schema":1285},"How we optimized infrastructure spend at GitLab","We keep our cloud spend under control with a spend optimization framework – now we're sharing it with you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681701/Blog/Hero%20Images/piggy_bank.jpg","https://about.gitlab.com/blog/how-we-optimized-our-infrastructure-spend-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we optimized infrastructure spend at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Davis Townsend\"}],\n        \"datePublished\": \"2020-10-27\",\n      }",{"title":1281,"description":1282,"authors":1287,"heroImage":1283,"date":1289,"body":1290,"category":695,"tags":1291},[1288],"Davis Townsend","2020-10-27","\n\nInfrastructure spend optimization is a hot topic these days as many established companies are migrating workloads to the cloud. Similarly,  fast-growing startups are struggling to control their operating costs as they expand their cloud footprint to meet user demand. \n\nAt GitLab we have taken a methodical and data-driven approach to the problem so we can reduce our cloud spend and control our operating costs, while still creating great features for our customers. We designed a five-stage framework which emphasizes building awareness of our infrastructure spend to the point where any change in costs is well understood and no longer a surprise.\n\nOur framework is very similar to a normal data maturity framework (shown below) that would progress through descriptive, predictive, and finally prescriptive analytics, but we tailor it specifically for this domain. I'll explain each stage and what it looks like at GitLab so you can see how you might apply it to your own organization.\n\n![Normal Data Maturity Framework](https://about.gitlab.com/images/blogimages/2020-10-28-How-We-Optimized-Infra-spend/DMM.jpeg \"Normal Data Maturity Framework\"){: .medium.center}\nA normal data maturity framework \n{: .note.text-center}\n\n## Spend optimization framework\n\n## 1. Basic cost visibility\n This stage can be thought of as data exploration. You just want to understand as much as you can about where you are spending money at a high level. What vendors and services are you spending the most money on? This data is generally provided by cloud vendors through a billing console, as well as through billing exports. I've found the way to get the best use out of both options is to use the provided billing console for answering simple questions about specific costs quickly, and the exports for integrating this data into your own analytics architecture for more granular reporting, [multicloud](/topics/multicloud/) reporting, or for specific recurring reports you need over a longer time horizon.\n \n### GitLab example\nWhen starting out, we looked at Google Cloud Platform (GCP) and their [Default Billing Export](https://cloud.google.com/billing/docs/how-to/export-data-bigquery) to get an overview of which products/projects/SKUs were responsible for the majority of our spend.\n\n## 2. Cost allocation\nThis stage is all about going from high-level areas of spend to more granular dimensions that tie back to relevant business metrics in your company. At GitLab we may want to look at what we spend on particular services like CI runners, or what we spend to support employees using GitLab.com as part of their job vs. customer spend. This data may not be readily available to you so there could be a lot of work involved to tie these sorts of relevant business dimensions back to the cost reports provided by your vendor.\n\n### GitLab example\nFor our production architecture we had some [GCP labels](https://cloud.google.com/compute/docs/labeling-resources) that indicated the internal service applied to the majority of our instances, so we started with those to see which services we spent most of our money on. More recently, we have created a [handbook page for Infrastructure Standards](/handbook/infrastructure-standards/) around project creation and label naming so that we can get even more insight out of our bill.\n\n\n\n## 3. Optimize usage efficiency\nOnce you can allocate costs to their relevant business metrics, then can you start to ask interesting questions such as, “Why is our storage spend so high on feature x?” By asking these questions and then talking with the subject matter experts about these potential areas of optimization you can start to come up with ideas to reduce some of this cost.\n\n### GitLab example\nWhen we reached this stage we began to identify many areas of opportunity, including:\n\n- [CI runners](https://gitlab.com/gitlab-org/gitlab/-/issues/35777): One of the areas discovered from stage 2 happened to be our CI runners, for which we created more granular reporting to see the cost by specific repos, pipelines, and jobs, which allowed us to find some ways to optimize our own internal use of CI.\n- [Object storage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10087): We discovered high storage costs for outdated Postgres backups. We resolved this by enabling bucket lifecycle policies and reduced our object storage for that bucket by 900TB.\n- [Network usage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10222): By correlating a recent change in our spend profile to a network architecture change, we were able to highlight the need for additional changes. We ultimately implemented a change to directly download runner artifacts from GCS instead of having the traffic be proxied. This significantly reduced our overall networking cost.\n\n## 4. Measure business outcomes vs spend\n\nWhen you get to a point for a particular area where you feel like you have done all the basic optimizations and aren't sure where else you could reduce cost without seriously impacting your employees or customers, you have reached stage 4. This stage is all about analyzing the value of more complex changes that could reduce spend at the expense of something else, as well as considering the value and cost impact of major feature or architectural changes in the future.\n\n### GitLab example\nOur best example of this was our recent rollout of [advanced global search](https://docs.gitlab.com/ee/user/search/advanced_search.html) to all paid users on GitLab.com. In the first iterations of testing for this feature our costs were exceptionally high. Through a lot of hard work by the team responsible for the feature, they were able to significantly bring down the costs while improving functionality. Through those efforts, GitLab was able to bring this great feature to the platform in a way that also made sense from a business perspective.\n\n## 5. Predict future spend and problem areas\nOnce your company has matured the practices above, you can start to become proactive about observing cost. You can also begin to detect and alert when spend is outside expected thresholds. Once you get to this point, infrastructure optimization should become a boring topic, and when you no longer have any cases of huge unexpected cost increases that were not due to unexpected increases in customer demand, you know you are doing a great job.\n\n### GitLab example\n\nWe’re still working on this stage ourselves. While we’ve had some success in detecting unexpected spend, and even tying it to anomalous behavior in our platform, we recognize we have much more to do here. We are still working to get most of our usage to Stages 3-4, while spending parallel effort to reach Stage 5 for some more mature workloads.\n\n## Current state and next steps\nToday at GitLab, depending on the workload, we are anywhere between stages 1-4. The bulk of the work is going into getting everything to at least stage 2, and from there we can work on getting everything to stages 3-4. Current efforts include applying our newly created [infrastructure standards](/handbook/infrastructure-standards/) across all of our infrastructure, bringing in relevant product usage data from our various services, and giving PMs the tools they need to better manage the cost of their services through a single source of truth of base level cost metrics.\n\n## Workflow and planning\nCost optimization is a difficult topic to tackle effectively as it involves many different stakeholders across the business who all have their own priorities. The way we are taking this problem on at GitLab is we have an [issue board](https://gitlab.com/groups/gitlab-com/-/boards/1502173?label_name[]=infrafin) where we plan and track progress on issues related to infrastructure spend. For all the major initiatives we assign priority to these based on four factors:\n\n1.  Cost savings\n2.  Customer impact  \n3.  Future potential cost impact\n4.  Effort required\n  \nThese factors are discussed and reviewed by our analyst, our SaaS offering product manager, and the relevant subject matter expert for the area. Once the priority is agreed upon, the product manager works with various product teams to get these scheduled into milestones or backlog queues for the teams that need to implement the changes. Progress is tracked on the issue board, and reviewed for priority to ensure the solution moves forward at an appropriate velocity.\n\n## More to read\n\nAll of this info and more can be found in our [Cost Management Handbook](/handbook/engineering/infrastructure/cost-management/). We continue to improve this page to provide our own employees with the resources they need to understand this topic better, as well as providing external viewers some idea of how they could think about infrastructure optimization in their own company.\n\nYou might also enjoy:\n* [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n* [How we migrated application servers from Unicorn to Puma](/blog/migrating-to-puma-on-gitlab/)\n* [How we upgraded PostgreSQL at GitLab.com](/blog/gitlab-pg-upgrade/)\n\nCover image by [Fabian Blank](https://unsplash.com/@blankerwahnsinn?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,722,1292,789],"collaboration",{"slug":1294,"featured":6,"template":700},"how-we-optimized-our-infrastructure-spend-at-gitlab","content:en-us:blog:how-we-optimized-our-infrastructure-spend-at-gitlab.yml","How We Optimized Our Infrastructure Spend At Gitlab","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab.yml","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"_path":1300,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1301,"content":1307,"config":1312,"_id":1314,"_type":13,"title":1315,"_source":15,"_file":1316,"_stem":1317,"_extension":18},"/en-us/blog/install-gitlab-one-click-gcp-marketplace",{"title":1302,"description":1303,"ogTitle":1302,"ogDescription":1303,"noIndex":6,"ogImage":1304,"ogUrl":1305,"ogSiteName":686,"ogType":687,"canonicalUrls":1305,"schema":1306},"Install GitLab with a single click from the new GCP Marketplace","GitLab is now available on the new Google Cloud Platform Marketplace, so you can deploy GitLab on Google Kubernetes Engine with a single click!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680061/Blog/Hero%20Images/gcp-send-gitlab-large.png","https://about.gitlab.com/blog/install-gitlab-one-click-gcp-marketplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Install GitLab with a single click from the new GCP Marketplace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-07-18\",\n      }",{"title":1302,"description":1303,"authors":1308,"heroImage":1304,"date":1309,"body":1310,"category":300,"tags":1311},[1166],"2018-07-18","\nToday, Google Cloud announced its [new Google Cloud Platform (GCP) marketplace](https://cloudplatform.googleblog.com/2018/07/introducing-commercial-kubernetes-applications-in-gcp-marketplace.html) with the ability to deploy applications to your Kubernetes clusters on Google Kubernetes Engine (GKE). We’re proud to make GitLab available in the GCP Marketplace from day one. While you can [install GitLab almost anywhere](/install/), the new GCP Marketpklace app installs with just a single click. It's the easiest way to get your own self-managed GitLab instance up and running.\n\n![Deploy GitLab on Google Cloud Platform](https://about.gitlab.com/images/google-cloud-platform/gcp-send-gitlab-medium.png)\n\n### Not looking to manage your own instance?\n\nFolks who don’t want to take on the overhead of administering their own GitLab instance can [sign up for GitLab.com](https://gitlab.com/users/sign_in). GitLab.com is a SaaS offering that runs the same software as GitLab self-managed, managed by GitLab.\n\nRecently, we announced our [migration from Azure to GCP](/blog/moving-to-gcp/). This migration is the first step in our goal of running GitLab.com as a cloud native application on Kubernetes. The migration has involved careful planning along with decomposing GitLab into individual services. The lessons learned through our migration have translated directly into our how we are building the GitLab Helm Chart. The work we’ve done to migrate GitLab.com has fueled our ability to offer a solid option for self-managed users to deploy GitLab to Kubernetes.\n\n### Want to deploy your application to Kubernetes?\n\nWith a built-in container registry and [Kubernetes integration](/solutions/kubernetes/), GitLab makes it easier than ever to get started with containers and cloud native development. [Gitlab CI/CD](/topics/ci-cd/) can deploy your application to any Kubernetes cluster.\n\nIf you don’t have a Kubernetes cluster, we’ve got you covered. The easiest way to get set up in using our [GKE Integration](/partners/technology-partners/google-cloud-platform/) and [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). It takes just a few clicks to set up, then you have a full deployment pipeline. Just commit your code and GitLab does rest.\n\n![GitLab deploys your app to Google Cloud Platform](https://about.gitlab.com/images/google-cloud-platform/gitlab-send-app-medium.png)\n\n#### Join us at Google Next\n\nNext week on July 24-27 we’ll be at [Google Nex](https://cloud.withgoogle.com/next18/sf/)t in San Francisco, where there’s a lot going on. [Follow GitLab on Twitter](https://twitter.com/gitlab) to stay up to date on announcements from the show. If you’re at the show, stop by booth #S1629 and say hi! We’d love to hear how you are using GitLab and show you how our GKE Integration and Marketplace install work.  \n\n#### Summary\n\nYou can use GitLab either as a self-managed app or as a service on GitLab.com. Today, we’ve made it easier than ever to install [GitLab with the GCP Marketplace](https://console.cloud.google.com/marketplace/details/gitlab-public/gitlab?filter=solution-type:k8s). Additionally, we’ll be moving GitLab.com to GCP and soon afterward to GKE. You can look forward to the increased stability and performance that Kubernetes will bring to GitLab.com. Regardless of whether you are using self-managed GitLab or GitLab.com, GitLab’s Kubernetes integration and GKE integration make it easy to deploy your app to Kubernetes. Stop by Google Next and follow our Twitter feed to get the latest news on using GitLab together with Google Cloud Platform.\n",[789,810,9,811],{"slug":1313,"featured":6,"template":700},"install-gitlab-one-click-gcp-marketplace","content:en-us:blog:install-gitlab-one-click-gcp-marketplace.yml","Install Gitlab One Click Gcp Marketplace","en-us/blog/install-gitlab-one-click-gcp-marketplace.yml","en-us/blog/install-gitlab-one-click-gcp-marketplace",{"_path":1319,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1320,"content":1326,"config":1332,"_id":1334,"_type":13,"title":1335,"_source":15,"_file":1336,"_stem":1337,"_extension":18},"/en-us/blog/its-time-to-upgrade-docker-engine",{"title":1321,"description":1322,"ogTitle":1321,"ogDescription":1322,"noIndex":6,"ogImage":1323,"ogUrl":1324,"ogSiteName":686,"ogType":687,"canonicalUrls":1324,"schema":1325},"It's time to update Docker Engine","Now that Alpine Linux 3.14 is being used by more images, it's time to upgrade Docker Engine to 20.10.6 or newer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/its-time-to-upgrade-docker-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to update Docker Engine\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomasz Maczukin\"}],\n        \"datePublished\": \"2021-08-26\",\n      }",{"title":1321,"description":1322,"authors":1327,"heroImage":1323,"date":1329,"body":1330,"category":695,"tags":1331},[1328],"Tomasz Maczukin","2021-08-26","\n\n[Alpine Linux](https://alpinelinux.org/) distribution is the base OS used by many Linux container images. It provides a handy packaging mechanism, new versions of software, and a quick and predictable release cycle – all while being distributed using a minimal image size. It's used by many very popular container images, for example `docker:dind`,\n[widely used in GitLab CI/CD workloads](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) handling container\nimages building and management in the jobs.\n\nOn June 15, 2021, Alpine Linux released version 3.14. [As documented in the release notes](\nhttps://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2), changes in the `musl` library require\nan updated version of [runc](https://github.com/opencontainers/runc) or updated version of\n[Docker](https://github.com/moby/moby) for the Alpine 3.14-based images to work properly.\n\nSoftware products across the computer industry have started migrating their Alpine Linux-based container images to 3.14 since it includes significant updates for various network and security-oriented use cases. In cases where the GitLab Runner environment uses a Docker version older than 20.10.6 to handle new container images based on Alpine 3.14, CI/CD jobs may encounter unexpected problems during execution and cause jobs to fail.\n\n[We encountered this problem](https://gitlab.com/gitlab-org/gitlab/-/issues/335641) at GitLab a few weeks ago, when\nthe `ruby:2.7` image was migrated to use Alpine Linux 3.14 as the base. We used a quick workaround to unlock our\npipelines by explicitly tagging the Alpine 3.13 version of the image (fortunately, it was provided!). To fully\nresolve the problem for all GitLab.com users who use our instance runners, we pushed forward an update to our autoscaled\nVMs base image, which included an update of Docker Engine.\n\n\nOne of the popular and widely used container images that is migrating to Alpine 3.14 [are the `docker` and \n`docker:dind` images](https://github.com/docker-library/docker/pull/317).\nWhat's important is the change will rebuild\nand re-push the existing specific images for supported versions, like `docker:20.10-dind`. This means users\nwho pinned their version of the Docker-in-Docker service in their `.gitlab-ci.yml` files will still get the image\nversion updated to Alpine 3.14. Using a Docker Engine older than 20.10.6 will probably create\nproblems for the user.\n\n## What's the solution?\n\nThe real solution is to upgrade the execution environment accordingly to Alpine's release notes, which state:\n\n> Therefore, Alpine Linux 3.14 requires **at least one** of the following:\n>\n> 1. runc v1.0.0-rc93\n>    - If using Docker's Debian repositories, this is part of containerd.io 1.4.3-2\n>    - If using Docker Desktop for Windows or Mac, this is part of Docker Desktop 3.3.0\n> 1. Docker 20.10.0 (which contains [moby commit a181391](https://github.com/moby/moby/commit/a18139111d8a203bd211b0861c281ebe77daccd9))\nor greater, **AND** libseccomp 2.4.4 (which contains backported [libseccomp commit 5696c89](https://github.com/seccomp/libseccomp/commit/5696c896409c1feb37eb502df33cf36efb2e8e01))\nor greater. In this case, to check if your host libseccomp is faccessat2-compatible, invoke\n`scmp_sys_resolver faccessat2`. If `439` is returned, faccessat2 is supported. If `-1` is returned, faccessat2 is not\nsupported. Note that if runc is older than v1.0.0-rc93, Docker must still be at least version 20.10.0, regardless of\nthe result of this command.\n> 1. As a workaround, in order to run under old Docker or libseccomp versions,\n[the moby default seccomp profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) should be\ndownloaded and on line 2, `defaultAction` changed to `SCMP_ACT_TRACE`, then `--seccomp-profile=default.json` can be\npassed to dockerd, or `--security-opt=seccomp=default.json` passed to `docker create` or `docker run`. This will cause\nthe system calls to return ENOSYS instead of EPERM, allowing the container to fall back to faccessat.\n>\n> Note: When using nested Docker, **every layer** must meet one of the above requirements, since if\n**any layer** improperly denies the use of faccessat2, Alpine Linux 3.14 will not function correctly.\n\nThere are several ways to solve this problem, but since they depend on a specific configuration, users need to choose the solution that best matches their environment.\n\nAlthough the [release notes](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2) mentions Docker 20.10.0, (which brings some needed changes), the release notes also mention that the updated version of libseccomp must be used in this case. For environments that use Docker Engine on Linux, these criteria should be met by Docker Engine 20.10.6 and higher.\n\nThe requirement for nested Docker environments (which in case of GitLab CI/CD mostly means\nthe Docker-in-Docker based jobs) to work properly with images based on Alpine Linux 3.14, both the Docker\nEngine on Runner's host **AND** the `docker:dind` image must be updated to at least 20.10.6.\n\nTo summarize:\n\n1. Users **using images** based on Alpine Linux 3.14 for their job execution (read: as the value of `image:` or\n`services:` keywords in `.gitlab-ci.yml`) must update Docker Engine on their hosts to version 20.10.6 or higher.\n\n1. Users **building images** based on Alpine Linux 3.14 using the Docker-in-Docker approach (read: using\n`services: [docker:X.Y-dind]` and `script: [..., docker build -t my/image ., ...]` in `.gitlab-ci.yml`) must\nalso update the `docker:dind` image version to `docker:20.10.6-dind` or higher.\n\n**For users of GitLab.com instance-level Runners, the upgrade of Docker Engine was completed a few weeks ago. Still, users likely need to update the used Docker-in-Docker service to `docker:20.10.6-dind` or higher.**\n\n## Some temporary workarounds\n\nSince the update of Docker Engine may not be easy in some environments, the only known workaround is to pin used\nimages to versions using Alpine Linux 3.13. As you can see in the [Docker library issue](https://github.com/docker-library/docker/pull/317#issuecomment-880140631), many projects have already found this\nis a problem for their users and provided the versions of images tagged with `-alpine3.13` suffix.\n\nThe Docker-in-Docker case described in this post [was done quite recently](https://github.com/docker-library/docker/pull/327).\nUsers who can't update the Docker Engine on the Runner host or for Docker-in-Docker can temporarily solve\nthe problem by using for example `services: [docker:19.03.15-dind-alpine3.13]`.\n\nRemember that this is only a temporary solution. For example, the official `docker` image\n[have already abandoned the 19.03 line](https://github.com/docker-library/docker/pull/329) and new images for `19.03.x` will\nnot be released.\n\nThe only real, long-term solution is to plan and maintain the upgrade. \n\n",[108,9],{"slug":1333,"featured":6,"template":700},"its-time-to-upgrade-docker-engine","content:en-us:blog:its-time-to-upgrade-docker-engine.yml","Its Time To Upgrade Docker Engine","en-us/blog/its-time-to-upgrade-docker-engine.yml","en-us/blog/its-time-to-upgrade-docker-engine",{"_path":1339,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1340,"content":1345,"config":1350,"_id":1352,"_type":13,"title":1353,"_source":15,"_file":1354,"_stem":1355,"_extension":18},"/en-us/blog/moving-to-gcp",{"title":1341,"description":1342,"ogTitle":1341,"ogDescription":1342,"noIndex":6,"ogImage":801,"ogUrl":1343,"ogSiteName":686,"ogType":687,"canonicalUrls":1343,"schema":1344},"We’re moving from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform – here’s what this means for you now and in the future.","https://about.gitlab.com/blog/moving-to-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We’re moving from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-06-25\",\n      }",{"title":1341,"description":1342,"authors":1346,"heroImage":801,"date":1347,"body":1348,"category":695,"tags":1349},[951],"2018-06-25","\nUpdate Jul 19, 2018: The latest info can be found in the [GCP migration update](/blog/gcp-move-update/) blog post. \n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: moving from Azure to Google Cloud Platform (GCP).\n\nWe believe [Kubernetes](/solutions/kubernetes/) is the future. It's a technology that makes reliability at massive scale possible. This is why earlier this year we shipped native [integration with Google Kubernetes Engine](/blog/gke-gitlab-integration/) (GKE) to give GitLab users a simple way to use Kubernetes. Similarly, we've chosen GCP as our cloud provider because of our desire to run GitLab on Kubernetes. Google invented Kubernetes, and GKE has the most robust and mature Kubernetes support. Migrating to GCP is the next step in our plan to make GitLab.com ready for your mission-critical workloads.\n\nOnce the migration has taken place, we’ll continue to focus on bumping up the stability and scalability of GitLab.com, by moving our worker fleet across to Kubernetes using GKE. This move will leverage our [Cloud Native charts](https://gitlab.com/charts/gitlab), which with [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/#cloud-native-gitlab-helm-chart-now-beta) are now in beta.\n\n## How we’re preparing for the migration\n\n### Geo\n\nOne GitLab feature we are utilizing for the GCP migration is our [Geo product](https://docs.gitlab.com/ee/administration/geo/).\nGeo allows for full, read-only mirrors of GitLab instances. Besides browsing the GitLab UI, Geo instances can be used for cloning and fetching projects, allowing geographically distributed teams to collaborate more efficiently.\n\nNot only does that allow for disaster recovery in case of an unplanned outage, Geo can also be used for a planned failover to migrate GitLab instances.\n\n![GitLab Geo - Migration](https://about.gitlab.com/images/gitlab_ee/gitlab_geo_diagram_migrate.png){: .medium.center}\n\nFollowing our mantra of dogfooding everything of our product, we are using Geo to move GitLab.com from Microsoft Azure to Google Cloud Platform. Geo is working well and scales because it's been used by many customers reliably since going GA. We believe Geo will perform well during the migration and plan this event as another proof point for its value.\n\nRead more about Disaster Recovery with Geo in our [Documentation](https://docs.gitlab.com/ee/administration/geo/disaster_recovery/).\n\n#### The Geo transfer\n\nFor the past few months, we have maintained a Geo secondary site of GitLab.com, called `gprd.gitlab.com`, running on Google Cloud Platform. This secondary keeps an up-to-date synchronized copy of about 200TB of Git data and 2TB of relational data in PostgreSQL. Originally we also replicated Git LFS, File Uploads and other files, but this has since been migrated to Google Cloud Storage object storage, in a parallel effort.\n\nFor logistical reasons, we selected GCP's `us-east1` site in the US state of South Carolina. Our current Azure datacenter is in US East 2, located in Virginia. This is a round-trip distance of 800km, or 3 light-milliseconds. In reality, this translates into a 30ms ping time between the two sites.\n\nBecause of the huge amount of data we need to synchronize between Azure and GCP, we were initially concerned about this additional latency and the risk it might have on our Geo transfer. However, after our initial testing, we realized that network latency and bandwidth were not bottlenecks in the transfer.\n\n### Object storage\n\nIn parallel to the Geo transfer, we are also migrating all file artifacts, including CI Artifacts, Traces (CI log files), file attachments, LFS objects and other file uploads to [Google Cloud Storage](https://cloud.google.com/storage/) (GCS), Google's managed object storage implementation. This has involved moving about 200TB of data off our Azure-based file servers into GCS.\n\nUntil recently, GitLab.com stored these files on NFS servers, with NFS volumes mounted onto each web and API worker in the fleet. NFS is a single-point-of-failure and can be difficult to scale. Switching to GCS allows us to leverage its built-in redundancy and multi-region capabilities. This in turn will help to improve our own availability and remove single-points-of-failure from our stack. The object storage effort is part of our longer-term strategy of lifting GitLab.com infrastructure off NFS. The [Gitaly project](https://gitlab.com/gitlab-org/gitaly), a Git RPC service for GitLab, is part of the same initiative. This effort to migrate GitLab.com off NFS is also a prerequisite for our plans to move GitLab.com over to Kubernetes.\n\n### How we're working to ensure a smooth failover\n\nOnce or twice a week, several teams, including [Geo](/handbook/engineering/development/enablement/systems/geo/), [Production](https://about.gitlab.com/handbook/engineering/infrastructure/production/), and [Quality](https://about.gitlab.com/handbook/engineering/quality/), get together to jump onto a video call and conduct a rehearsal of the failover in our staging environment.\n\nLike the production event, the rehearsal takes place from Azure across to GCP. We timebox this event, and carefully monitor how long each phase takes, looking to cut time off wherever possible. The failover currently takes two hours, including quality assurance of the failover environment.\n\nThis involves four steps:\n\n- A [preflight checklist](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/preflight_checks.md),\n- The main [failover procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failover.md),\n- The [test plan](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/test_plan.md) to verify that everything is working, and\n- The [failback procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failback.md), used to undo the changes so that the staging environment is ready for the next failover rehearsal.\n\nSince these documents are stored as issue templates on GitLab, we can use them to create issues on each successive failover attempt.\n\nAs we run through each rehearsal, new bugs, edge-cases and issues are discovered. We track these issues in the [GitLab Migration tracker](https://gitlab.com/gitlab-com/migration/issues). Any changes to the failover procedure are then made as [merge requests into the issue templates](https://gitlab.com/gitlab-com/migration/merge_requests?scope=all&state=all).\n\nThis process allows us to iterate rapidly on the failover procedure, improving the failover documentation and helping the team build confidence in the procedure.\n\n## When will the migration take place?\n\nOur absolute [top priority](https://gitlab.com/gitlab-com/migration#failover-priorities) for the failover is to ensure that we protect the integrity of our users' data. We will only conduct the failover once we are completely satisfied that all serious issues have been ironed out, that there is no risk of data loss, and that our new environment on Google Cloud Platform is ready for production workloads.\n\nThe failover is currently scheduled for Saturday, July 28, 2018. We will follow this post up shortly with further information on the event and will provide plenty of advance notice.\n\nRead the most recent update on [GitLabs journey from Azure to GCP](/blog/gitlab-journey-from-azure-to-gcp/) here!\n",[9,789,810,811],{"slug":1351,"featured":6,"template":700},"moving-to-gcp","content:en-us:blog:moving-to-gcp.yml","Moving To Gcp","en-us/blog/moving-to-gcp.yml","en-us/blog/moving-to-gcp",{"_path":1357,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1358,"content":1364,"config":1369,"_id":1371,"_type":13,"title":1372,"_source":15,"_file":1373,"_stem":1374,"_extension":18},"/en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"title":1359,"description":1360,"ogTitle":1359,"ogDescription":1360,"noIndex":6,"ogImage":1361,"ogUrl":1362,"ogSiteName":686,"ogType":687,"canonicalUrls":1362,"schema":1363},"Provision group runners with Google Cloud Platform and GitLab CI","This tutorial will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098300/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_623844718_4E5Fx1Q0DHikigzCsQWhOG_1750098300048.jpg","https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Provision group runners with Google Cloud Platform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-19\",\n      }",{"title":1359,"description":1360,"authors":1365,"heroImage":1361,"date":1366,"body":1367,"category":695,"tags":1368},[744,745],"2024-11-19","Are you interested in hosting your own servers to run your GitLab CI/CD\npipelines but don’t know where to begin? Setting up a GitLab Runner to run\nyour pipelines on your own infrastructure can seem like a daunting task as\nit requires infrastructure knowledge and the know-how to maintain that\ninfrastructure. Typically this process requires the provision of\ninfrastructure, the installing of dependency, and testing that it works with\nyour GitLab instance.\n\n\nThis article highlights how easy it is to easily spin up a GitLab Runner of\nyour own utilizing GitLab’s Google Cloud Integration. Follow this tutorial\nand it will teach you how to set up a new group runner on GitLab.com using\nGoogle Cloud Platform in less than 10 minutes!\n\n\nYou will learn how to:\n\n\n- Create a new group runner.\n\n- Configure the new group runner’s tags and description.\n\n- Register the new group runner by adding in configurations.\n\n- Provision the GitLab Runner utilizing `gcloud cli` and Terraform.\n\n- Have your GitLab Runner pick up its first GitLab CI job.\n\n\n## Prerequisites\n\n- A terminal with Bash installed\n\n- Owner access on a Google Cloud Platform project\n\n- Terraform (or OpenTofu) [Version\n1.5](https://releases.hashicorp.com/terraform/1.5.7/) or greater \n\n- [gcloud CLI](https://cloud.google.com/sdk/docs/install) \n\n- 10 minutes\n\n\n## Tutorial\n\n1. Create a new group runner under __Build > Runners > New Group Runner__.\n\n\n__Note:__ Navigate to the group level.\n\n\n![GitLab Runner setup\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098317126.png)\n\n\n2. Configure the new group runner's tags, description, and any additional\nconfigurations.\n\n\n![New Group Runner\nsetup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098317127.png)\n\n\n3. Select __Google Cloud__.\n\n\n![Select Google Cloud\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098317129.png)\n\n\n4. Copy your project ID from Google Cloud Platform.\n\n\n![Copy project ID from GCP\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098317131.png)\n\n\n5. Fill out your Google Cloud project ID and choose a region, zone, and type\nof machine you want to use.\n\n\n![Screen to fill out Google Cloud\ninformation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098317132.png)\n\n\n6\\. Once this information is filled out, click **Setup instructions**.\n\n\nRun the bash script provided in Step 1 above.\n\n\n**Note:** This script was saved to a file called `setup.sh` for ease of use.\nYou may copy this right into your terminal if you are confident in\ndebugging.\n\n\n![Setup instructions\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098317134.png)\n\n\n![Script for GitLab\nRunner](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098317135.png)\n\n\n7\\. Create a `main.tf` file and follow the instructions in GitLab.\n\n\n**Note:** If you want to use OpenTofu instead of Terraform, you can still\ncopy the code and only have to adjust the Terraform commands for applying\nthe configuration. \n\n\n![Install and register GitLab Runner\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098317136.png)\n\n\nOnce successfully provisioned, you should be see the following:\n\n\n![GitLab Runner\ncode](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098317137.png)\n\n\n8\\. If you close the instructions and click the **View runners** button, you\nwill now have a newly provisioned runner present with \"Never contacted\" as\nits status.\n\n\n![Newly provisioned runner on\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098317139.png)\n\n\n9\\. In any project, add the following `.gitlab-ci.yml`.\n\n\n```  \n\nstages:  \n  - greet\n\nhello_job:  \n  stage: greet  \n  tags:  \n    - gcp-runner  \n  script:  \n    - echo \"hello\"  \n```\n\n\nVolia! You have set up your first GitLab Runner utilizing Google Cloud\nPlatform.\n\n\n# Next steps\n\n\nNow that you have provisioned your very own GitLab Runner, consider\noptimizing it for your specific use case. Some things to consider with your\nrunner moving forward:\n\n\n- Is the runner I provisioned the right size? Does it need additional\nresources for my use case? \n\n- Does the GitLab Runner contain all the dependency my builds need?  \n\n- How can I store the GitLab Runner as infrastructure as code?\n\n\n> Make sure to bookmark the [Provisioning runners in Google Cloud\ndocumentation](https://docs.gitlab.com/ee/ci/runners/provision_runners_google_cloud.html)\nfor easy reference.\n",[723,495,721,108,749,9,232],{"slug":1370,"featured":6,"template":700},"provision-group-runners-with-google-cloud-platform-and-gitlab-ci","content:en-us:blog:provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","Provision Group Runners With Google Cloud Platform And Gitlab Ci","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"_path":1376,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1377,"content":1382,"config":1387,"_id":1389,"_type":13,"title":1390,"_source":15,"_file":1391,"_stem":1392,"_extension":18},"/en-us/blog/running-a-consistent-serverless-platform",{"title":1378,"description":1379,"ogTitle":1378,"ogDescription":1379,"noIndex":6,"ogImage":1140,"ogUrl":1380,"ogSiteName":686,"ogType":687,"canonicalUrls":1380,"schema":1381},"Run a consistent serverless platform with GitLab and Knative","Portability of your serverless platform is now easy with GitLab and Knative.","https://about.gitlab.com/blog/running-a-consistent-serverless-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Run a consistent serverless platform with GitLab and Knative\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Gruesso\"}],\n        \"datePublished\": \"2019-05-02\",\n      }",{"title":1378,"description":1379,"authors":1383,"heroImage":1140,"date":1028,"body":1385,"category":300,"tags":1386},[1384],"Daniel Gruesso","\nThis past April, [Cloud Run](https://cloud.google.com/run/) was announced at Google Cloud Next. As a Google Cloud partner, GitLab had the opportunity to participate and demo our integration during the talk titled, \"[Run a consistent serverless platform anywhere with Kubernetes and Knative](https://youtu.be/lb_bRRAgEyc?t=1100).\"\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lb_bRRAgEyc?start=1100\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nJust as Kubernetes has become the de facto default platform for running containers, Knative is shaping up to become the answer for running [serverless](/topics/serverless/) workloads in Kubernetes. Cloud Run brings all the benefits of Knative in a fully managed service or as an add-on to your Kubernetes cluster (called “Cloud Run on GKE”), abstracting developers from the complexities of deploying Kubernetes, Knative, and managing a cluster. This empowers developers to focus on adding value vs having to deploy and manage infrastructure.\n\nAt GitLab we believe in the power of open source and adopted Kubernetes and Knative from early on. During the talk, we demoed how GitLab enables operators to deploy Knative with ease so that developers can start deploying Functions-as-a-service (FaaS) or serverless applications using GitLab’s built-in features. GitLab also provides the configured Istio-Ingress endpoints automatically, which operators can then use to configure DNS for their domain, as well as providing the option to bind the domain to the ingress endpoint (via ConfigMap) so that the serving controller can configure the routes. This is all done with a single click.\n\nAfter provisioning your project with the required [serverless templates](https://docs.gitlab.com/ee/update/removals.html), GitLab will automatically build and deploy your application or function as a Knative service, provide you with the endpoint where the service is provisioned, and display load/invocation metrics for your function.\n\n![GitLab Serverless](https://docs.gitlab.com/ee/update/removals.html){: .shadow.small.center.wrap-text}\n\nWhile it’s still early on, we are very excited to partner with both Google Cloud and the Knative community to bring all this awesome functionality to the GitLab community.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nLearn more about [GitLab Serverless](https://docs.gitlab.com/ee/user/project/clusters/serverless)\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nLearn more about [Cloud Run](http://cloud.google.com/run)\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[789,278,810,9,811],{"slug":1388,"featured":6,"template":700},"running-a-consistent-serverless-platform","content:en-us:blog:running-a-consistent-serverless-platform.yml","Running A Consistent Serverless Platform","en-us/blog/running-a-consistent-serverless-platform.yml","en-us/blog/running-a-consistent-serverless-platform",{"_path":1394,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1395,"content":1401,"config":1409,"_id":1411,"_type":13,"title":1412,"_source":15,"_file":1413,"_stem":1414,"_extension":18},"/en-us/blog/tracking-down-missing-tcp-keepalives",{"title":1396,"description":1397,"ogTitle":1396,"ogDescription":1397,"noIndex":6,"ogImage":1398,"ogUrl":1399,"ogSiteName":686,"ogType":687,"canonicalUrls":1399,"schema":1400},"Tracking TCP Keepalives: Lessons in Docker, Golang & GitLab","An in-depth recap of debugging a bug in the Docker client library.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680874/Blog/Hero%20Images/network.jpg","https://about.gitlab.com/blog/tracking-down-missing-tcp-keepalives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":1402,"description":1397,"authors":1403,"heroImage":1398,"date":1405,"body":1406,"category":695,"tags":1407},"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab",[1404],"Stan Hu","2019-11-15","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2019-12-03.\n\n{: .alert .alert-info .note}\n\n\nWhat began as failure in a GitLab static analysis check led to a\n\ndizzying investigation that uncovered a subtle [bug in the Docker client\n\nlibrary code](https://github.com/docker/for-linux/issues/853) used by\n\nthe GitLab Runner. We ultimately worked around the problem by upgrading\n\nthe Go compiler, but in the process we uncovered an unexpected change in\n\nthe Go TCP keepalive defaults that fixed an issue with Docker and GitLab\n\nCI.\n\n\nThis investigation started on October 23, when backend engineer [Luke\n\nDuncalfe](/company/team/#.luke) mentioned, \"I'm seeing\n\n[`static-analysis` failures with no\noutput](https://gitlab.com/gitlab-org/gitlab/-/jobs/331174397).\n\nIs there something wrong with this job?\" He opened [a GitLab\n\nissue](https://gitlab.com/gitlab-org/gitlab/issues/34951) to discuss.\n\n\nWhen Luke ran the static analysis check locally on his laptop, he saw\n\nuseful debugging output when the test failed. For example, an extraneous\n\nnewline would accurately be reported by Rubocop. However, when the same\n\ntest ran in GitLab's automated test infrastructure, the test failed\n\nquietly:\n\n\n![Failed\njob](https://about.gitlab.com/images/blogimages/docker-tcp-keepalive-debug/job-failure.png){:\n.shadow.center}\n\n\nNotice how the job log did not include any clues after the `bin/rake\n\nlint:all` step. This made it difficult to determine whether a real\n\nproblem existed, or whether this was just a flaky test.\n\n\nIn the ensuing days, numerous team members reported the same problem.\n\nNothing kills productivity like silent test failures.\n\n\n## Was something wrong with the test itself?\n\n\nIn the past, we had seen that if that specific test generated enough\n\nerrors, [the output buffer would fill up, and the continuous integration\n\n(CI) job would lock\n\nindefinitely](https://gitlab.com/gitlab-org/gitlab-foss/issues/61432). We\n\nthought we had [fixed that issue months\n\nago](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/28402). Upon\n\nfurther review, that fix seemed to eliminate any chance of a thread\n\ndeadlock.\n\n\nDid we have to flush the buffer? No, because the Linux kernel will do\n\nthat for an exiting process already.\n\n\n## Was there a change in how CI logs were handled?\n\n\nWhen a test runs in GitLab CI, the [GitLab\n\nRunner](https://gitlab.com/gitlab-org/gitlab-runner/) launches a Docker\n\ncontainer that runs commands specified by a `.gitlab-ci.yml` inside the\n\nproject repository. As the job runs, the runner streams the output to\n\nthe GitLab API via PATCH requests. The GitLab backend saves this data\n\ninto a file. The following sequence diagram shows how this works:\n\n\n```plantuml\n\n== Get a job! ==\n\nRunner -> GitLab: POST /api/v4/jobs/request\n\nGitLab -> Runner: 201 Job was scheduled\n\n\n== Job sends logs (1 of 2) ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n\n== Job sends logs (2 of 2) ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n```\n\n\n[Henrich Lee Yu](/company/team/#engwan) mentioned\n\nthat we had recently [disabled a feature flag that changed how GitLab\n\nhandled CI job\n\nlogs](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture).\n[The\n\ntiming seemed to line\n\nup](https://gitlab.com/gitlab-org/gitlab/issues/34951#note_236723888).\n\n\nThis feature, called live CI traces, eliminates the need for a shared\n\nPOSIX filesystem (e.g., NFS) when saving job logs to disk by:\n\n\n1. Streaming data into memory via Redis\n\n2. Persisting the data in the database (PostgreSQL)\n\n3. Archiving the final data into object storage\n\n\nWhen this flag is enabled, the flow of CI job logs looks something like\n\nthe following:\n\n\n```plantuml\n\n== Get a job! ==\n\nRunner -> GitLab: POST /api/v4/jobs/request\n\nGitLab -> Runner: 201 Job was scheduled\n\n\n== Job sends logs ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> Redis: Save chunk\n\nGitLab -> Runner: 202 Accepted\n\n...\n\n== Copy 128 KB chunks from Redis to database ==\n\nGitLab -> Redis: GET gitlab:ci:trace:id:chunks:0\n\nGitLab -> PostgreSQL: INSERT INTO ci_build_trace_chunks\n\n...\n\n== Job finishes ==\n\n\nRunner -> GitLab: PUT /api/v4/job/:id\n\nGitLab -> Runner: 200 Job was updated\n\n\n== Archive trace to object storage ==\n\n```\n\n\nLooking at the flow diagram above, we see that this approach has more\n\nsteps. After receiving data from the runner, something could have gone\n\nwrong with handling a chunk of data. However, we still had many\n\nquestions:\n\n\n1. Did the runners send the right data in the first place?\n\n1. Did GitLab drop a chunk of data somewhere?\n\n1. Did this new feature actually have anything to do with the problem?\n\n1. Are they really making another Gremlins movie?\n\n\n## Reproducing the bug: Simplify the `.gitlab-ci.yml`\n\n\nTo help answer those questions, we simplified the `.gitlab-ci.yml` to\n\nrun only the `static-analysis` step. We inserted a known Rubocop error,\n\nreplacing a `eq` with `eql`. We first ran this test on a separate GitLab\n\ninstance with a private runner. No luck there – the job showed the right\n\noutput:\n\n\n```\n\nOffenses:\n\n\nee/spec/models/project_spec.rb:55:42: C: RSpec/BeEql: Prefer be over eql.\n        expect(described_class.count).to eql(2)\n                                         ^^^\n\n12669 files inspected, 1 offense detected\n\n```\n\n\nHowever, we repeated the test on our staging server and found that we\n\nreproduced the original problem. In addition, the live CI trace feature\n\nflag had been activated on staging. Since the problem occurred with and\n\nwithout the feature, we could eliminate that feature as a possible\n\ncause.\n\n\nPerhaps something with the GitLab server environment caused a\n\nproblem. For example, could the load balancers be rate-limiting the\n\nrunners? As an experiment, we pointed a private runner at the staging\n\nserver and re-ran the test. This time, it succeeded: the output was\n\nshown. That seemed to suggest that the problem had more to do with the\n\nrunner than with the server.\n\n\n## Docker Machine vs. Docker\n\n\nOne key difference between the two tests: One runner used a shared,\n\nautoscaled runner using a [Docker\n\nMachine](https://docs.docker.com/machine/overview/) executor, and the\n\nprivate runner used a [Docker\n\nexecutor](https://docs.gitlab.com/runner/executors/docker.html).\n\n\nWhat does Docker Machine do exactly? The following diagram may help\n\nillustrate:\n\n\n![Docker Machine](https://docs.docker.com/machine/img/machine.png){:\n.medium.center}\n\n\nThe top-left shows a local Docker instance. When you run Docker from the\n\ncommand-line interface (e.g., `docker attach my-container`), the program\n\njust makes [REST calls to the Docker Engine\n\nAPI](https://docs.docker.com/engine/api/v1.40/).\n\n\nThe rest of the diagram shows how Docker Machine fits into the\n\npicture. Docker Machine is an entirely separate program. The GitLab\n\nRunner shells out to `docker-machine` to create and destroy virtual\n\nmachines using cloud-specific (e.g. Amazon, Google, etc.) drivers. Once\n\na machine is running, the runner then uses the Docker Engine API to run,\n\nwatch, and stop containers.\n\n\nNote that this API is used securely over an HTTPS connection. This is an\n\nimportant difference between the Docker Machine executor and Docker\n\nexecutor: The former needs to communicate across the network, while the\n\nlatter can either use a local TCP socket or UNIX domain socket.\n\n\n## Google Cloud Platform timeouts\n\n\nWe've known for a while that Google Cloud [has a 10-minute idle\n\ntimeout](https://cloud.google.com/compute/docs/troubleshooting/general-tips),\n\nwhich has caused issues in the past:\n\n\n> Note that idle connections are tracked for a maximum of 10 minutes,\n\n> after which their traffic is subject to firewall rules, including the\n\n> implied deny ingress rule. If your instance initiates or accepts\n\n> long-lived connections with an external host, you should adjust TCP\n\n> keep-alive settings on your Compute Engine instances to less than 600\n\n> seconds to ensure that connections are refreshed before the timeout\n\n> occurs.\n\n\nWas the problem caused by this timeout? With the Docker Machine\n\nexecutor, we found that we could reproduce the problem with a simple\n\n`.gitlab-ci.yml`:\n\n\n```yaml\n\nimage: \"busybox:latest\"\n\n\ntest:\n  script:\n    - date\n    - sleep 601\n    - echo \"Hello world!\"\n    - date\n    - exit 1\n```\n\n\nThis would reproduce the failure, where we would never see the `Hello\n\nworld!` output. Changing the `sleep 601` to `sleep 599` would make the\n\nproblem go away. Hurrah! All we have to do is tweak the system TCP\n\nkeepalives, right? Google provided these sensible settings:\n\n\n```sh\n\nsudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60\nnet.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5\n\n```\n\n\nHowever, enabling these kernel-level settings didn't solve the\n\nproblem. Were keepalives even being sent? Or was there some other issue?\n\nWe turned our attention to network traces.\n\n\n## Eavesdropping on Docker traffic\n\n\nIn order to understand what was happening, we needed to be able to\n\nmonitor the network communication between the runner and the Docker\n\ncontainer. But how exactly does the GitLab Runner stream data from a\n\nDocker container to the GitLab server?  The following diagram\n\nillustrates the flow:\n\n\n```plantuml\n\nRunner -> Docker: POST /containers/name/attach\n\nDocker -> Runner: \u003Ccontainer output>\n\nDocker -> Runner: \u003Ccontainer output>\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n```\n\n\nFirst, the runner makes a [POST request to attach to the container\n\noutput](https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach).\n\nAs soon as a process running in the container outputs some data, Docker\n\nwill transmit the data over this HTTPS stream. The runner then copies\n\nthis data to GitLab via the PATCH request.\n\n\nHowever, as mentioned earlier, traffic between a GitLab Runner and the\n\nremote Docker machine is encrypted over HTTPS on port 2376. Was there an\n\neasy way to disable HTTPS? Searching through the code of Docker Machine,\n\nwe found that it did not appear to be supported out of the box.\n\n\nSince we couldn't disable HTTPS, we had two ways to eavesdrop:\n\n\n1. Use a man-in-the-middle proxy (e.g. [mitmproxy](https://mitmproxy.org/))\n\n1. Record the traffic and decrypt the traffic later using the private keys\n\n\n## Ok, let's be the man-in-the-middle!\n\n\nThe first seemed more straightforward, since [we already had experience\n\ndoing this with the Docker\n\nclient](https://docs.gitlab.com/ee/administration/packages/container_registry.html#running-the-docker-daemon-with-a-proxy).\n\n\nHowever, after [defining the proxy variables for GitLab\n\nRunner](https://docs.gitlab.com/runner/configuration/proxy.html#adding-proxy-variables-to-the-runner-config),\n\nwe found we were only able to intercept the GitLab API calls with\n\n`mitmproxy`. The Docker API calls still went directly to the remote\n\nhost. Something wasn't obeying the proxy configuration, but we didn't\n\ninvestigate further. We tried the second approach.\n\n\n## Decrypting TLS data\n\n\nTo decrypt TLS data, we would need to obtain the encryption keys. Where\n\nwere these located for a newly-created system with `docker-machine`? It\n\nturns out `docker-machine` worked in the following way:\n\n\n1. Call the Google Cloud API to create a new machine\n\n1. Create a `/root/.docker/machine/machines/:machine_name` directory\n\n1. Generate a new SSH keypair\n\n1. Install the SSH key on the server\n\n1. Generate a new TLS certificate and key\n\n1. Install and configure Docker on the newly-created machine with TLS\ncertificates\n\n\nAs long as the machine runs, the directory will contain the information\n\nneeded to decode this traffic. We ran `tcpdump` and saved the private keys.\n\n\nOur first attempt at decoding the traffic failed. Wireshark could not\n\ndecode the encrypted traffic, although general TCP traffic could still\n\nbe seen. Researching more, we found out why: If the encrypted traffic\n\nused a [Diffie-Hellman key\n\nexchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange),\n\nhaving the private keys would not suffice! This is by design, a property\n\ncalled [perfect forward\n\nsecrecy](https://en.m.wikipedia.org/wiki/Forward_secrecy).\n\n\nTo get around that limitation, we modified the GitLab Runner to disable\n\ncipher suites that used the Diffie-Hellman key exchange:\n\n\n```diff\n\ndiff --git\na/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\nb/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\n\nindex 6b4c6a7c0..a3f86d756 100644\n",[268,725,549,721,9,721,1408,855,697],"AWS",{"slug":1410,"featured":6,"template":700},"tracking-down-missing-tcp-keepalives","content:en-us:blog:tracking-down-missing-tcp-keepalives.yml","Tracking Down Missing Tcp Keepalives","en-us/blog/tracking-down-missing-tcp-keepalives.yml","en-us/blog/tracking-down-missing-tcp-keepalives",{"_path":1416,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1417,"content":1423,"config":1429,"_id":1431,"_type":13,"title":1432,"_source":15,"_file":1433,"_stem":1434,"_extension":18},"/en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab",{"title":1418,"description":1419,"ogTitle":1418,"ogDescription":1419,"noIndex":6,"ogImage":1420,"ogUrl":1421,"ogSiteName":686,"ogType":687,"canonicalUrls":1421,"schema":1422},"Tutorial: Migrate from Google Cloud Source Repositories to GitLab","Google Cloud is deprecating Cloud Source Repositories. Learn how to migrate a CSR source code repository to GitLab, along with best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097739/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2813%29_1zdtbfPDHZVe6JC2AbdHmb_1750097738370.png","https://about.gitlab.com/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Migrate from Google Cloud Source Repositories to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2024-08-28\",\n      }",{"title":1418,"description":1419,"authors":1424,"heroImage":1420,"date":1426,"body":1427,"category":1107,"tags":1428},[1425,763],"Tsukasa Komatsubara","2024-08-28","Google Cloud’s [deprecation of Cloud Source\nRepositories](https://cloud.google.com/source-repositories/docs/release-notes)\n(CSR) has prompted development teams to seek a full-featured alternative for\ntheir source code repositories. GitLab, a [Google Cloud Technology\nPartner](https://cloud.google.com/find-a-partner/partner/gitlab-inc), is a\nstrong choice due to its comprehensive DevSecOps capabilities.\n\n\nIn this tutorial, you'll learn the steps to ensure a smooth transition from\nCSR to GitLab, whether you're using GitLab.com or a self-managed instance on\nGoogle Cloud.\n\n\n## Why GitLab?\n\nTransitioning from Google Cloud Source Repositories to GitLab is a\nrecommended step. As a strategic partner of Google Cloud, GitLab seamlessly\nintegrates with existing infrastructure with ease and brings value to\ncustomers in the following ways:\n\n- **Unified DevSecOps platform**\n    - Consolidate your entire development lifecycle into a single application, from planning to monitoring. Eliminate tool sprawl and dramatically boost productivity.\n- **Seamless Google Cloud integration**\n    - Effortlessly connect with GKE, Cloud Build, and Cloud Storage, ensuring a smooth migration and efficient operations within the Google Cloud ecosystem.\n- **Advanced CI/CD capabilities**\n    - Leverage [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to automate everything from security scanning to deployment, accelerating your development cycles.\n- **Industry-recognized AI coding assistance**\n    - Benefit from built-in AI-assisted development with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), fostering a secure and efficient coding environment.\n\n## Prerequisites\n\n\nBefore you start the migration, ensure you have:\n\n- GitLab account: Set up your account on GitLab.com or on a self-hosted\ninstance.\n\n- GitLab project: Create a blank project in GitLab where the CSR repository\nwill be migrated.\n\n\n## Migration steps\n\n\n1. Create a blank GitLab project: This will serve as the destination for\nyour migrated CSR repository. Keep this project empty for now.\n\n2. Generate a personal access token (PAT): Navigate to GitLab settings and\n[generate a\nPAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\nwith `read_repository` and `write_repository` scopes enabled. This token\nwill be used to authenticate your Git operations during the migration\nprocess.\n\n3. Edit code in Cloud Shell Editor: From your CSR repository, open the Cloud\nShell Editor by clicking the “Edit code” button. You’ll need to authorize\nthe Cloud Shell and select “Trust repo” to proceed.\n\n\n![Google Cloud Shell\nEditor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097750/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097750517.png)\n\n\n4. Inspect Git status: Run `git status` in the Cloud Shell to check the\ncurrent branch and ensure everything is in order before pushing to GitLab.\n\n\n![Inspect Git\nstatus](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097750/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097750518.png)\n\n\n5. Set Up the Remote Repository: Add your GitLab project as a remote\nrepository by running:\n\n\n```\n\ngit remote add origin [GITLAB_PROJECT_URL]\n\n\n```\n\n\n6. Replace `[GITLAB_PROJECT_URL]` with the actual URL of your GitLab\nproject.\n\nPush to GitLab: Finally, push your local repository to GitLab by running: \n\n\n```\n\ngit push -u origin [BRANCH_NAME]\n\n\n```\n\n\n7. Replace `[BRANCH_NAME]` with the current branch name you noted earlier.\n\nWhen prompted, use your GitLab username and the PAT as the password to\nauthenticate and complete the push.\n\n\n## Best practices\n\n\n- Back up before you begin: Always back up your CSR repository before\nstarting the migration process.\n\n- Test after migration: Ensure all aspects of the repository, including\nbranches and CI/CD pipelines, are functioning as expected in GitLab.\n\n- Leverage GitLab features: Take advantage of GitLab’s advanced DevSecOps\nfeatures such as [AI](https://about.gitlab.com/gitlab-duo/),\n[CI/CD](https://docs.gitlab.com/ee/ci/), and [Enterprise Agile\nplanning](https://about.gitlab.com/solutions/agile-delivery/) to enhance\nyour development workflow.\n\n\nMoving from Google Cloud Source Repositories to GitLab is easy and offers\nmore benefits than just managing source code. GitLab, with its integration\nwith Google Cloud, makes it an ideal choice for developers seeking to\nenhance their workflow post-migration.\n\n\n> Read more about [GitLab's integration with Google\nCloud](https://about.gitlab.com/blog/gitlab-google-cloud-integrations-now-in-public-beta/).\n",[723,9,495],{"slug":1430,"featured":6,"template":700},"tutorial-migrate-from-google-cloud-source-repositories-to-gitlab","content:en-us:blog:tutorial-migrate-from-google-cloud-source-repositories-to-gitlab.yml","Tutorial Migrate From Google Cloud Source Repositories To Gitlab","en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab.yml","en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab",{"_path":1436,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1437,"content":1443,"config":1449,"_id":1451,"_type":13,"title":1452,"_source":15,"_file":1453,"_stem":1454,"_extension":18},"/en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"title":1438,"description":1439,"ogTitle":1438,"ogDescription":1439,"noIndex":6,"ogImage":1440,"ogUrl":1441,"ogSiteName":686,"ogType":687,"canonicalUrls":1441,"schema":1442},"Tutorial: Secure BigQuery data publishing with GitLab ","Learn how to create repeatable, auditable, and efficient processes for automating and securing BigQuery data exports.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","https://about.gitlab.com/blog/tutorial-secure-bigquery-data-publishing-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Secure BigQuery data publishing with GitLab \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2025-03-25\",\n      }",{"title":1438,"description":1439,"authors":1444,"heroImage":1440,"date":1445,"body":1446,"category":695,"tags":1447},[763],"2025-03-25","GitLab offers a powerful solution for automating and securing\n[BigQuery](https://cloud.google.com/bigquery) data exports. This integration\ntransforms manual exports into repeatable, auditable processes that can\neliminate security vulnerabilities while saving valuable time. This tutorial\nexplains how to implement this solution so you can quickly reduce manual\noperations, permission issues, and security concerns with just a few lines\nof GitLab YAML code.\n\n\nFollow along with this step-by-step video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gxXX-ItAreo?si=FijY9wMVppCW-18q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## The solution architecture\n\n\nOur solution leverages GitLab CI/CD pipelines to automate the secure export\nof data from BigQuery to Google Cloud Storage. Here's the high-level\narchitecture:\n\n\n1. SQL code is stored and version-controlled in GitLab.  \n\n2. After code review and approval, GitLab CI/CD pipeline executes the\ncode.  \n\n3. The pipeline authenticates with Google Cloud.  \n\n4. SQL queries are executed against BigQuery.  \n\n5. Results are exported as CSV files to Google Cloud Storage.  \n\n6. Secure links to these files are provided for authorized consumption.\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* **Google Cloud APIs enabled:** BigQuery API and Cloud Storage API  \n\n* **Service account** with appropriate permissions:  \n  * BigQuery Job User  \n  * Storage Admin  \n  * **Note:** For this demo, we're using the service account approach for authentication, which is simpler to set up. For production environments, you might consider using GitLab's identity and access management integration with Google Cloud. This integration leverages Workload Identity Federation, which provides enhanced security and is more suitable for enterprise customers and organizations.  \n* **GitLab project** ready to store your SQL code and pipeline configuration\n\n\n## Step-by-step implementation\n\n\n**1. Configure Google Cloud credentials.**\n\n\nFirst, set up the necessary environment variables in your GitLab project:\n\n\n- Go to your **GitLab project > Settings > CI/CD**.  \n\n- Expand the **Variables** section.  \n\n- Add the following variables:  \n   * `GCS_BUCKET`: Your Google Cloud Storage bucket name  \n   * `GCP_PROJECT_ID`: Your Google Cloud project ID  \n   * `GCP_SA_KEY`: Base64-encoded service account key (mark as masked)\n\n**2. Create your SQL query.**\n\n\nCreate a file named `query.sql` in your GitLab repository with your BigQuery\nSQL query. The query looks like this:\n\n\n```\n\n-- This query shows a list of the daily top Google Search terms.\n\nSELECT\n   refresh_date AS Day,\n   term AS Top_Term,\n       -- These search terms are in the top 25 in the US each day.\n   rank,\nFROM `bigquery-public-data.google_trends.top_terms`\n\nWHERE\n   rank = 1\n       -- Choose only the top term each day.\n   AND refresh_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 2 WEEK)\n       -- Filter to the last 2 weeks.\nGROUP BY Day, Top_Term, rank\n\nORDER BY Day DESC\n   -- Show the days in reverse chronological order.\n\n```\n\n\nThis query gets the top 25 search terms from Google Trends for the current\nday.\n\n\n**3. Configure the GitLab CI/CD pipeline.**\n\n\nCreate a `.gitlab-ci.yml` file in your repository root:\n\n\n```\n\nimage: google/cloud-sdk:alpine\n\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n\nexecute:\n  stage: deploy\n  script: \n    # Set up Google Cloud authentication and install necessary components\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $SERVICE_ACCOUNT_KEY | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud components install gsutil\n    # Set the active Google Cloud project\n    - gcloud config set project $PROJECT_ID\n    # Run the BigQuery query and export the results to a CSV file\n    - bq query --format=csv --use_legacy_sql=false \u003C test.sql > results.csv\n    # Create a Google Cloud Storage bucket if it doesn't exist\n    - gsutil ls gs://${CLOUD_STORAGE_BUCKET} || gsutil mb gs://${CLOUD_STORAGE_BUCKET}\n    # Upload the CSV file to the storage bucket\n    - gsutil cp results.csv gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Set the access control list (ACL) to make the CSV file publicly readable\n    - gsutil acl ch -u AllUsers:R gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Define the static URL for the CSV file\n    - export STATIC_URL=\"https://storage.googleapis.com/${CLOUD_STORAGE_BUCKET}/results.csv\"\n    # Display the static URL for the CSV file\n    - echo \"File URL = $STATIC_URL\"\n\n```\n\n\n**4. Run the pipeline.**\n\n\nNow, whenever changes are merged to your main branch, the pipeline will\nprovide a link to the CSV file stored on the Google Cloud Storage bucket.\nThis file contains the result of the executed SQL query that GitLab subjects\nto security checks.\n\n\n## Benefits of this approach\n\n\n* **Security:** Authentication is handled automatically via service accounts\n(or Workload Identity Federation for enhanced security in production\nenvironments).  \n\n* **Auditability:** All data exports are tracked through GitLab commits and\npipeline logs.  \n\n* **Repeatability:** Consistent, predictable export process on every run,\nand can be scheduled.  \n\n* **Version control:** SQL queries are properly versioned and reviewed.  \n\n* **Automation:** Significantly fewer manual exports, reducing human error.\n\n\n## Try it today\n\n\nBy combining GitLab's DevSecOps capabilities with Google Cloud's BigQuery\nand Cloud Storage, you've now automated and secured your data publishing\nworkflow. This approach reduces manual operations, resolves permission\nheadaches, and addresses security concerns – all achieved with just a few\nlines of GitLab CI code.\n\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/big-query-data-publishing)\nto get started now.\n",[726,495,723,1448,232,9],"workflow",{"slug":1450,"featured":90,"template":700},"tutorial-secure-bigquery-data-publishing-with-gitlab","content:en-us:blog:tutorial-secure-bigquery-data-publishing-with-gitlab.yml","Tutorial Secure Bigquery Data Publishing With Gitlab","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab.yml","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"_path":1456,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1457,"content":1463,"config":1468,"_id":1470,"_type":13,"title":1471,"_source":15,"_file":1472,"_stem":1473,"_extension":18},"/en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"title":1458,"description":1459,"ogTitle":1458,"ogDescription":1459,"noIndex":6,"ogImage":1460,"ogUrl":1461,"ogSiteName":686,"ogType":687,"canonicalUrls":1461,"schema":1462},"Utilize the GitLab DevOps platform to avoid cloud migration hazards","The GitLab modern DevOps platform can simplify and accelerate planning, managing, moving, and modernizing applications and infrastructure as companies adopt a cloud-first posture on AWS and Google Cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Utilize the GitLab DevOps platform to avoid cloud migration hazards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nima Badiey\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":1458,"description":1459,"authors":1464,"heroImage":1460,"date":1465,"body":1466,"category":852,"tags":1467},[891],"2022-01-25","\nThese unprecedented times have been an unexpected catalyst driving companies to finally get serious about moving to the cloud. The adoption wave started in retail and banking by consumers who were unable to shop and bank in-person and were forced instead to drastically increase their online purchases.\n\nAs a result, many e-commerce sites hosted on public clouds experienced a Cambrian explosion of activity and business. The impact of the pandemic soon crossed every industry and segment from healthcare and education to hospitality and food services, as more and more companies closed their offices in favor of remote work. With closed buildings came closed data centers and other short-staffing of business-critical services.\n\nCoupled with supply chain disruptions of compute, networking, and storage gear, many IT teams were faced with mounting business continuity challenges, which impacted service level agreements, product quality, and ultimately customer satisfaction.\n\nThe answer to these challenges is to move applications, data, and infrastructure from on-premises to the cloud, with hosting provided by large public cloud providers like Amazon Web Services (AWS) and Google Cloud – both of which are better suited to support business-critical services. \n\nAs businesses continue to define their new processes and procedures, one condition is likely to become permanent: Cloud adoption is expected to accelerate and spread across all industries. [IDC FutureScape](https://www.businesswire.com/news/home/20191029005144/en/IDC-FutureScape-Outlines-the-Impact-Digital-Supremacy-Will-Have-on-Enterprise-Transformation-and-the-IT-Industry) predicts that by 2024 more than 50% of all IT spending will go toward digital transformation and cloud-first innovation projects.\n\nDespite this immutable momentum, many CIOs remain reticent as 80% are still concerned that cloud adoption initiatives alone won’t deliver the expected business agility they need, according to [a McKinsey report](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/unlocking-business-acceleration-in-a-hybrid-cloud-world).\n\nOne reason for this is that migrating and modernizing applications simultaneously to the cloud takes more effort and experience than organizations can afford. To be successful, organizations need to adopt new software development strategies and DevOps tools to support hybrid and multi-cloud models. These teams often lack the consistent methodology and toolchains to plan, prioritize, automate, and track the progress of cloud migration projects. Adding to the risks, many companies are hampered with legacy software development workflows, disconnected processes, and siloed tools. They are further burdened with a complicated inventory of mismatched legacy hardware, aging networks, security, and application stacks that are poorly suited to cloud-native architectures.\n\nUltimately, successful cloud migrations require mastering the basics by adopting proven, repeatable, and reliable processes such as breaking big initiatives into manageable workstreams. Consistency and structured repeatability have a greater impact on project success than executive sponsorship, funding, or upgrading the company culture to an “agile” mindset. GitLab plays a critical role in the successful deployment and delivery of these cloud migration projects. \n\n## DevOps: The first logical step in cloud adoption\n\nGitLab is a modern DevOps platform used by startups as well as midsize and Fortune 500 companies to build and deliver software through an integrated toolset. In simple terms, it’s Git for source code management with a built-in CI/CD pipeline that includes security, code scanning, and monitoring. GitLab is an all-in-one integrated platform. No need to digitally piece multiple solutions together and no more switching between different tools and apps just to deploy software code. \n\nAs enterprises plan to migrate apps, services, data, and/or infrastructure to the cloud this year, these projects will benefit from new ways to plan, manage, and deliver value from their cloud investments.\n\nTo get started, GitLab, together with AWS and Google Cloud, has chronicled this journey with valuable guidance to help cloud teams embrace the cultural shift necessary for modern agile teams. In these guides, we map out an approach that empowers cross-functional teams to work together concurrently during migrations, refactorization, and adoption of new cloud services.\n\nWith GitLab, users can define custom assessment methodologies, create repeatable task lists for application migration, store app code and Terraform configuration scripts in Git, and set security protocols easily through simple merge requests. GitLab can also automate the process of testing, scanning, monitoring, and deploying business apps. By embracing next-gen DevOps, cloud migration projects can be more successful with proven, repeatable, and reliable processes all managed on the GitLab DevOps platform. \n\n### Learn more:\n- [Migration to Google Cloud and adopting cloud native](https://learn.gitlab.com/gitlab-google-cloud)\n- [Accelerate your migration to AWS using a DevOps model](https://learn.gitlab.com/gitlab-aws-microsite)\n\n",[854,789,9],{"slug":1469,"featured":6,"template":700},"utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","content:en-us:blog:utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","Utilize The Gitlab Devops Platform To Avoid Cloud Migration Hazards","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"_path":1475,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1476,"content":1482,"config":1494,"_id":1496,"_type":13,"title":1497,"_source":15,"_file":1498,"_stem":1499,"_extension":18},"/en-us/blog/a-benchmarking-framework-for-sast",{"title":1477,"description":1478,"ogTitle":1477,"ogDescription":1478,"noIndex":6,"ogImage":1479,"ogUrl":1480,"ogSiteName":686,"ogType":687,"canonicalUrls":1480,"schema":1481},"A Google Summer of Code project: creating a benchmarking framework for SAST","Our 2022 Google Summer of Code project helped to create a benchmarking framework for SAST.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677267/Blog/Hero%20Images/benchmarking.png","https://about.gitlab.com/blog/a-benchmarking-framework-for-sast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A Google Summer of Code project: creating a benchmarking framework for SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Henriksen\"},{\"@type\":\"Person\",\"name\":\"Martynas Krupskis\"},{\"@type\":\"Person\",\"name\":\"Mark Art\"},{\"@type\":\"Person\",\"name\":\"Dinesh Bolkensteyn\"},{\"@type\":\"Person\",\"name\":\"Isaac Dawson\"},{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2022-09-27\",\n      }",{"title":1477,"description":1478,"authors":1483,"heroImage":1479,"date":1490,"body":1491,"category":1492,"tags":1493},[1484,1485,1486,1487,1488,1489],"Michael Henriksen","Martynas Krupskis","Mark Art","Dinesh Bolkensteyn","Isaac Dawson","Julian Thome","2022-09-27","In summer 2022, the [Vulnerability Research team at\nGitLab](/handbook/engineering/development/sec/secure/vulnerability-research/) \n\nlaunched the [Google Summer of Code\n(GSoC)](https://summerofcode.withgoogle.com/) project: \n\n[A benchmarking framework for\nSAST](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gsoc-2022/-/issues/1).\n\n\nThe goal of the project was to create a benchmarking framework, which would\nassess the\n\nimpact and quality of a security analyzer or configuration change before it\nreaches the production environment.\n\n\n## Preliminaries \n\n\n### GitLab SAST\n\n\nAs a complete DevOps Platform, GitLab has a variety of integrated [static\nanalysis (SAST) tools](/direction/secure/static-analysis/sast/) \n\nfor different languages and frameworks. These tools help developers find\n\nvulnerabilities as early as possible in the software development lifecycle.\n\nThese tools are constantly being updated, either by upgrading the underlying\n\nsecurity analyzers or by applying configuration changes.\n\n\nSince all the integrated SAST tools are very different in terms of\n\nimplementation, and depend on different tech stacks, they are all\n\nwrapped in Docker images. The wrappers translate tool-native vulnerability\n\nreports to a [generic, common report\nformat](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format)\nwhich is\n\nmade available by means of the `gl-sast-report.json` artifact. This generic\n\nreport is GitLab's common interface between analyzers and the GitLab Rails\n\nbackend.\n\n\nBenchmarking is important to assess the efficacy of analyzers and helps to\nmake\n\ndata-driven decisions. For example, benchmarking is useful for QA testing\n\n(spotting regressions), for data-driven decision making, and for research by\n\nassessing the progression of the GitLab security feature performance over\ntime.\n\n\n### Google Summer Of Code (GSoC)\n\n\n[Google Summer of Code (GSoC)](https://summerofcode.withgoogle.com/) \n\nis a 10-week program that enlists contributors to work on open source\nprojects\n\nin collaboration with open source organizations. For GSoC 2022, GitLab\noffered\n\nfour projects to GSoC contributors. The contributors completed each of the\n\nprojects with the guidance from GitLab team members who mentored them and\n\nprovided regular feedback and assistance when needed.\n\n\n### Terms & Notation\n\n\nIn this blog post, we use the terms/acronyms below to classify findings\nreported by security analyzers.\n\n\n| Acronym   | Meaning        |\nDescription                                                        |\n\n|-------|----------------|--------------------------------------------------------------------|\n\n| _TP_  | True Positive  | Analyzer correctly identifies a\nvulnerability.                     |\n\n| _FP_  | False Positive | Analyzer misidentifies a vulnerability or\nreported a vulnerability where none exist. |\n\n| _TN_  | True Negative  | Analyzer correctly ignores a potential false\npositive.             |\n\n| _FN_  | False Negative | Analyzer does not report a known\nvulnerability.                    |\n\n\nFor the figures in the blog post we use the following notation: processes\nare\n\ndepicted as rounded boxes, whereas artifacts (e.g., files) are depicted as\n\nboxes; arrows denote an input/output (IO) relationship between the connected\nnodes.\n\n\n``` mermaid\n\nflowchart TB;\n\nsubgraph legend[ Legend ]\n   proc(Process);\n   art[Artifact];\n   proc -->|IO relation|art;\nend\n\n``` \n\n\n## Motivation\n\n\nThe authors of the paper [How to Build a\nBenchmark](https://dl.acm.org/doi/10.1145/2668930.2688819) distilled the\ndesirable characteristics of a benchmark below:\n\n> 1. Relevance: How closely the benchmark behavior correlates to behaviors\nthat are of interest to consumers of the results.\n\n> 2. Reproducibility: The ability to consistently produce similar results\nwhen the benchmark is run with the same test configuration.\n\n> 3. Fairness: Allowing different test configurations to compete on their\nmerits without artificial limitations.\n\n> 4. Verifiability: Providing confidence that a benchmark result is\naccurate.\n\n> 5. Usability: Avoiding roadblocks for users to run the benchmark in their\ntest environments.\n\n\nThere currently is no standard nor de facto language-agnostic SAST benchmark\n\nsatisfying all the criteria mentioned above. Many benchmark suites focus on\n\nspecific languages, are shipped with incomplete or missing ground-truths, or\n\nare based on outdated technologies and/or frameworks. A ground-truth or\n\nbaseline is the set of findings a SAST tool is expected to detect.\n\n\nThe main objective of the GSoC project was to close this gap and start to\n\ncreate a benchmarking framework that addresses all the desirable\ncharateristics\n\nmentioned above in the following manner:\n\n\n1. Relevance: Include realistic applications (in terms of size, framework\nusage\n   and customer demand).\n2. Reproducibility: Automate the whole benchmarking process in CI.\n\n3. Fairness: Make it easy to integrate new SAST tools by just tweaking the\nCI\n   configuration and use the [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format) as a common standard.\n4. Verifiability: Assemble baseline that includes all the relevant\n   vulnerabilities and make it publicly available. The baseline is the north star\n   that defines what vulnerabilities are actually included in a test application. \n5. Usability: Benchmark users can just integrate the benchmark as a\ndownstream\n   pipeline to their CI configuration.\n\n## A benchmarking framework for SAST\n\n\nThe benchmarking framework compares the efficacy of an analyzer against a\nknown\n\nbaseline. This is very useful for monitoring the efficacy of the analyzer\nthat\n\nparticipates in the benchmarking. The baseline is the gold standard that\nserves\n\nas a compass to guide analyzer improvements.\n\n\n### Usage\n\n\nFor using the framework, the following requirements have to be met:\n\n1. The analyzer has to be dockerized.\n\n1. The analyzer has to produce a vulnerability report that adheres to the\n   [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format)\n   format, which serves as our generic intermediate representation to compare\n   analyzer efficacy. \n1. The baseline expectations have to be provided as \n   [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format)\n   so that we can compare the analyzer output against it. \n\nThe framework is designed in such a way that it can be easily integrated\ninto\n\nthe CI configuration of existing GitLab projects by means of a [downstream\npipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n\nThere are many possible ways in which a downstream pipeline can be\ntriggered:\n\nsource code changes applied to an analyzer, configuration changes\n\napplied to an analyzer, or scheduled pipeline invocation. By using the\npipeline,\n\nwe can run the benchmarking frameworks continuously and instantaneously on\nthe GitLab\n\nprojects that host the source code of the integrated analyzers whenever code\nor\n\nconfiguration changes are applied. \n\n\n### Architecture \n\n\nThe figure below depicts the benchmarking framework when comparing an\nanalyzer\n\nagainst a baseline.\n\n\nWe assume that we have a baseline configuration available; a baseline\nconsists\n\nof an application that is an actual test application that includes\n\nvulnerabilities. These vulnerabilities are documented in an expectation file\n\nthat adheres to the [security report\nschema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format).\n\n\nNote that we use the terms baseline and expectation interchangeably. As\n\nmentioned earlier, the benchmarking framework is essentially a GitLab\npipeline\n\nthat can be triggered downstream. The configured analyzer then takes the\n\nbaseline app as input and generates a `gl-sast-report.json` file. The heart\nof the\n\nbenchmarking framework is the `compare` step, which compares the baseline\n\nagainst the report generated by the analyzer, both of which adhere to the\n\n[security report\nschema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format).\n\n\nThe compare step also computes the _TP_, _FN_ and _FP_ that have been\nreported by the\n\nanalyzer and computes different metrics based on this information. The\ncompare\n\nstep is implemented in the\n\n[evaluator\ntool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator). \n\n\n``` mermaid\n\nflowchart LR;\n\nsbx[gl-sast-report.json];\n\nbreport[Report];\n\nconfig[Configuration];\n\n\nconfig --> bf;\n\n\nsubgraph Baseline\n  bcollection[app];\n  baseline[expectation];\nend\n\n\nsubgraph bf [ Benchmarking Framework ]\n   orig(Analyzer);\n   compare(Compare);\n   orig --> sbx;\n   sbx --> compare;\nend\n\n\nbaseline --> compare;\n\ncompare --> breport\n\nbcollection --> orig\n\n```\n\n\nUsing the security report format as a common standard makes the benchmarking\n\nframework very versatile: the baseline could be provided by an automated\n\nprocess, by another analyzer, or manually, which happened to be the case in\nthis\n\nGSoC project.\n\n\n### Scoring\n\n\nThe main functionality of the [evaluator\ntool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\n\nis to compute the overlap/intersection, and difference between a baseline\nand\n\ngenerated report in order to uncover true positives, false positives, and\nfalse\n\nnegatives. \n\n\nThe relationship between _TP_, _FP_, _FN_, _TN_, baseline, and generated\nreport can be\n\nseen in the table below; it includes three columns `analyzer`, `baseline`\nand\n\n`classification`. The column `analyzer` represents the findings included in\nthe\n\nreport generated by the analyzer; column `baseline` represents the findings\n\nincluded in the baseline; column `classification` denotes the\n\nverdict/classification that the [evaluator\ntool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\n\nattaches to the analyzer finding when performing the comparison. The `X` and\n\n`-` denote reported and non-reported findings, respectively.\n\n\n| analyzer | baseline | classification |\n\n| -------- | -------  | -------------  |\n\n| -        | -        | _TN_           |\n\n| -        | X        | _FN_           |\n\n| X        | -        | _FP_           |\n\n| X        | X        | _TP_           |\n\n\nThe `classification` column in the table above shows that a _TP_ is a\n\nvulnerability existing in both baseline and generated report; similarly, an\n\n_FP_ is a vulnerability detected by an analyzer without a corresponding\n\nbaseline entry, while an _FN_ is a vulnerability present in the baseline but\n\nnot detected by an analyzer. Note, that _TN_ is practically not relevant for\n\nour use-case since the analyzers we are looking at only report unsafe,\n\nvulnerable cases instead of safe, non-vulnerable cases. \n\n\nAt the moment, the `evaluator` tool computes the metrics below:\n\n- Precision: _P_ = _TP_ /( _TP_ + _FP_ )\n\n- Recall: _R_ = _TP_ / ( _TP_ + _FN_ )\n\n- F-Score: _F_ = 2 * ( _P_ * _R_ ) / ( _P_ + _R_ ) \n\n- Jaccard-Index: _J_ = _TP_ / ( _TP_ + _FP_ + _FN_ )\n\n\nA higher precision indicates that an analyzer is less noisy due to the\nlow(er)\n\nnumber of _FPs_. Hence, a high precision leads to a reduction of auditing\neffort\n\nof irrelevant findings. A high recall represents an analyzer's detection\n\ncapacity. F-Score is a combined measure so that precision and recall can be\n\ncondensed to a single number. The Jaccard-Index is a single value to capture\n\nthe similarity between analyzer and baseline.\n\n\nThe [evaluator\ntool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\n\nsupports the addition of custom metrics via a simple call-back mechanism;\nthis\n\nenables us to add support more metrics in the future that help us to gain\n\nadditional or new insights with regards to the efficacy of our analyzers.\n\n\n### Framework Properties\n\n\nIn principle, the implemented benchmarking framework is language-agnostic:\nnew\n\nanalyzers and baselines can be plugged-in as long as they adhere to the\n\n[security report\nschema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format). \n\n\nEstablishing baselines is laborious since it requires (cross-)validation, \n\ntrying out attacks on the running baseline application and\n\ncode auditing.\n\n\nFor the GSoC project, we established baselines for the applications below\n\ncovering Java ([Spring](https://spring.io/)) and Python\n\n([Flask](https://flask.palletsprojects.com/)) as they are [ranking high in\nthe most used languages and\nframeworks](https://survey.stackoverflow.co/2022/#technology-most-popular-technologies). \n\nFor a benchmark application to have practical utility, it is important that\nthe\n\napplication itself is based on technology, including programming languages\nand\n\nframeworks, that are used in the industry.\n\n\nFor both of these applications, the baseline/expectations have been\ncollected,\n\nverified and are publicly availabe: \n\n-\n[WebGoat](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/WebGoat/-/tree/baselines). \n  WebGoat is a deliberately insecure Web application used to teach security vulnerabilities.\n  We chose this as baseline application because it is often used as a benchmark\n  app in the Java world and it is based on [Spring](https://spring.io/) which is\n  one of the most popular frameworks in the Java world. \n-\n[vuln-flask-web-app](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/vuln-flask-web-app/-/tree/report)\nLike WebGoat, this application is deliberately insecure.\n`vuln-flask-web-app` covers both Python and\n[Flask](https://flask.palletsprojects.com/en/2.2.x/), one of the most\npopular web frameworks in the Python world.\n\n\n## Conclusion\n\n\nThis GSoC project was a first step towards building a FOSS benchmarking\n\nframework that helps the community to test their own tools and to build up a\n\nrelevant suite of baselines covering various languages and frameworks. With\nthe\n\nhelp of the community, we will continue adding more baselines to the\n\nbenchmarking framework in the future to cover more languages and frameworks.\n\n\nIf you found the project interesting, you might want to check out the\nfollowing repositories:\n\n\n-\n[evaluator](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\n\n- [WebGoat\nbaseline](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/WebGoat/-/tree/baselines)\n\n- [Vulnerable Flask Web App\nbaseline](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/vuln-flask-web-app/-/tree/report)\n\n- [Example of downstream pipeline triggering\nevaluator](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator-downstream)\n\n\nCover image by [Maxim Hopman](https://unsplash.com/@nampoh) on\n[Unsplash](https://unsplash.com/photos/fiXLQXAhCfk)\n\n{: .note}\n","open-source",[1292,9,1210],{"slug":1495,"featured":6,"template":700},"a-benchmarking-framework-for-sast","content:en-us:blog:a-benchmarking-framework-for-sast.yml","A Benchmarking Framework For Sast","en-us/blog/a-benchmarking-framework-for-sast.yml","en-us/blog/a-benchmarking-framework-for-sast",5,[678,705,734,756,775,796,818,839,862],1758662328321]