[{"data":1,"prerenderedAt":1573},["ShallowReactive",2],{"/en-us/blog/tags/demo/":3,"navigation-en-us":19,"banner-en-us":449,"footer-en-us":466,"demo-tag-page-en-us":676},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":10,"_id":12,"_type":13,"title":14,"_source":15,"_file":16,"_stem":17,"_extension":18},"/en-us/blog/tags/demo","tags",false,"",{"tag":9,"tagSlug":9},"demo",{"template":11},"BlogTag","content:en-us:blog:tags:demo.yml","yaml","Demo","content","en-us/blog/tags/demo.yml","en-us/blog/tags/demo","yml",{"_path":20,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":22,"_id":445,"_type":13,"title":446,"_source":15,"_file":447,"_stem":448,"_extension":18},"/shared/en-us/main-navigation","en-us",{"logo":23,"freeTrial":28,"sales":33,"login":38,"items":43,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":24},{"href":25,"dataGaName":26,"dataGaLocation":27},"/","gitlab logo","header",{"text":29,"config":30},"Get free trial",{"href":31,"dataGaName":32,"dataGaLocation":27},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":34,"config":35},"Talk to sales",{"href":36,"dataGaName":37,"dataGaLocation":27},"/sales/","sales",{"text":39,"config":40},"Sign in",{"href":41,"dataGaName":42,"dataGaLocation":27},"https://gitlab.com/users/sign_in/","sign in",[44,88,186,191,297,357],{"text":45,"config":46,"cards":48,"footer":71},"Platform",{"dataNavLevelOne":47},"platform",[49,55,63],{"title":45,"description":50,"link":51},"The most comprehensive AI-powered DevSecOps Platform",{"text":52,"config":53},"Explore our Platform",{"href":54,"dataGaName":47,"dataGaLocation":27},"/platform/",{"title":56,"description":57,"link":58},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":59,"config":60},"Meet GitLab Duo",{"href":61,"dataGaName":62,"dataGaLocation":27},"/gitlab-duo/","gitlab duo ai",{"title":64,"description":65,"link":66},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":67,"config":68},"Learn more",{"href":69,"dataGaName":70,"dataGaLocation":27},"/why-gitlab/","why gitlab",{"title":72,"items":73},"Get started with",[74,79,84],{"text":75,"config":76},"Platform Engineering",{"href":77,"dataGaName":78,"dataGaLocation":27},"/solutions/platform-engineering/","platform engineering",{"text":80,"config":81},"Developer Experience",{"href":82,"dataGaName":83,"dataGaLocation":27},"/developer-experience/","Developer experience",{"text":85,"config":86},"MLOps",{"href":87,"dataGaName":85,"dataGaLocation":27},"/topics/devops/the-role-of-ai-in-devops/",{"text":89,"left":90,"config":91,"link":93,"lists":97,"footer":168},"Product",true,{"dataNavLevelOne":92},"solutions",{"text":94,"config":95},"View all Solutions",{"href":96,"dataGaName":92,"dataGaLocation":27},"/solutions/",[98,123,147],{"title":99,"description":100,"link":101,"items":106},"Automation","CI/CD and automation to accelerate deployment",{"config":102},{"icon":103,"href":104,"dataGaName":105,"dataGaLocation":27},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[107,111,115,119],{"text":108,"config":109},"CI/CD",{"href":110,"dataGaLocation":27,"dataGaName":108},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":61,"dataGaLocation":27,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":27,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":104,"dataGaLocation":27,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":27,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,137,142],{"text":133,"config":134},"Application Security Testing",{"href":135,"dataGaName":136,"dataGaLocation":27},"/solutions/application-security-testing/","Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":27,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":27},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":27},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":27,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":27,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":27,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":27,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":27,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":27,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":27,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":27},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":27},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":27},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":27,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":27},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":27},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":27},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":27},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":27},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":27},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":27},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":27},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":27},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":27},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":27},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":27},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":27},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":27},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":27},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":27},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":27},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":27},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":27},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":27},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":27},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":27},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":27},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":27},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":34,"config":364},{"href":36,"dataGaName":365,"dataGaLocation":27},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":27},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":27},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":41,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":56,"config":389},{"href":61,"dataGaName":56,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":108,"config":395},{"href":110,"dataGaName":108,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":69,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":32,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":61,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":13,"_source":15,"_file":464,"_stem":465,"_extension":18},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":27},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":672,"_type":13,"title":673,"_source":15,"_file":674,"_stem":675,"_extension":18},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":664},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,515,571,600,634],{"title":45,"links":493,"subMenu":498},[494],{"text":495,"config":496},"DevSecOps platform",{"href":54,"dataGaName":497,"dataGaLocation":475},"devsecops platform",[499],{"title":187,"links":500},[501,505,510],{"text":502,"config":503},"View plans",{"href":189,"dataGaName":504,"dataGaLocation":475},"view plans",{"text":506,"config":507},"Why Premium?",{"href":508,"dataGaName":509,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":511,"config":512},"Why Ultimate?",{"href":513,"dataGaName":514,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",{"title":516,"links":517},"Solutions",[518,523,525,527,532,537,541,544,548,553,555,558,561,566],{"text":519,"config":520},"Digital transformation",{"href":521,"dataGaName":522,"dataGaLocation":475},"/topics/digital-transformation/","digital transformation",{"text":133,"config":524},{"href":135,"dataGaName":133,"dataGaLocation":475},{"text":122,"config":526},{"href":104,"dataGaName":105,"dataGaLocation":475},{"text":528,"config":529},"Agile development",{"href":530,"dataGaName":531,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":533,"config":534},"Cloud transformation",{"href":535,"dataGaName":536,"dataGaLocation":475},"/topics/cloud-native/","cloud transformation",{"text":538,"config":539},"SCM",{"href":118,"dataGaName":540,"dataGaLocation":475},"source code management",{"text":108,"config":542},{"href":110,"dataGaName":543,"dataGaLocation":475},"continuous integration & delivery",{"text":545,"config":546},"Value stream management",{"href":162,"dataGaName":547,"dataGaLocation":475},"value stream management",{"text":549,"config":550},"GitOps",{"href":551,"dataGaName":552,"dataGaLocation":475},"/solutions/gitops/","gitops",{"text":172,"config":554},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":556,"config":557},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":559,"config":560},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":562,"config":563},"Education",{"href":564,"dataGaName":565,"dataGaLocation":475},"/solutions/education/","education",{"text":567,"config":568},"Financial services",{"href":569,"dataGaName":570,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":572},[573,575,577,579,582,584,586,588,590,592,594,596,598],{"text":204,"config":574},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":576},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":578},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":580},{"href":221,"dataGaName":581,"dataGaLocation":475},"docs",{"text":242,"config":583},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":585},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":587},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":589},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":591},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":593},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":595},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":597},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":599},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":601},[602,604,606,608,610,612,614,618,623,625,627,629],{"text":305,"config":603},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":605},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":607},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":609},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":611},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":613},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":615,"config":616},"Sustainability",{"href":617,"dataGaName":615,"dataGaLocation":475},"/sustainability/",{"text":619,"config":620},"Diversity, inclusion and belonging (DIB)",{"href":621,"dataGaName":622,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":624},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":626},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":628},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":630,"config":631},"Modern Slavery Transparency Statement",{"href":632,"dataGaName":633,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":635,"links":636},"Contact Us",[637,640,642,644,649,654,659],{"text":638,"config":639},"Contact an expert",{"href":36,"dataGaName":37,"dataGaLocation":475},{"text":367,"config":641},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":643},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":645,"config":646},"Status",{"href":647,"dataGaName":648,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":650,"config":651},"Terms of use",{"href":652,"dataGaName":653,"dataGaLocation":475},"/terms/","terms of use",{"text":655,"config":656},"Privacy statement",{"href":657,"dataGaName":658,"dataGaLocation":475},"/privacy/","privacy statement",{"text":660,"config":661},"Cookie preferences",{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"cookie preferences","ot-sdk-btn",{"items":665},[666,668,670],{"text":650,"config":667},{"href":652,"dataGaName":653,"dataGaLocation":475},{"text":655,"config":669},{"href":657,"dataGaName":658,"dataGaLocation":475},{"text":660,"config":671},{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":677,"featuredPost":1551,"totalPagesCount":1571,"initialPosts":1572},[678,704,728,750,770,789,812,832,854,877,900,919,939,961,982,1002,1020,1039,1058,1078,1098,1118,1139,1159,1179,1199,1220,1240,1259,1280,1301,1322,1342,1363,1384,1404,1424,1445,1465,1487,1509,1529],{"_path":679,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":680,"content":688,"config":697,"_id":700,"_type":13,"title":701,"_source":15,"_file":702,"_stem":703,"_extension":18},"/en-us/blog/applying-risk-management-to-remote-learning",{"title":681,"description":682,"ogTitle":681,"ogDescription":682,"noIndex":6,"ogImage":683,"ogUrl":684,"ogSiteName":685,"ogType":686,"canonicalUrls":684,"schema":687},"Applying risk management to pandemic-driven remote learning","A GitLab team member and parent offers some tips to improve today’s remote learning experience.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672774/Blog/Hero%20Images/pexels-august.jpg","https://about.gitlab.com/blog/applying-risk-management-to-remote-learning","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Applying risk management to pandemic-driven remote learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Meghan Maneval\"}],\n        \"datePublished\": \"2020-08-27\",\n      }",{"title":681,"description":682,"authors":689,"heroImage":683,"date":691,"body":692,"category":693,"tags":694},[690],"Meghan Maneval","2020-08-27","Like many of you, when COVID-19 began to spread in the Spring of 2020, I never imagined just how much my life would change. While I personally was accustomed to working remotely, my husband and children certainly were not. As the pandemic continues, parents around the world are faced with a new challenge: how to simultaneously manage their careers and their children’s educational needs. The risks, at times, can feel insurmountable. I went through every emotion this summer as I tried to strategize for what pandemic-driven remote learning would look like for my family. And then I realized, why am I trying to recreate the wheel? As an all-remote company, [GitLab’s values](https://handbook.gitlab.com/handbook/values/) and [all-remote culture](/company/culture/all-remote/) provide a proven model for successfully managing a remote workforce. So why not try it out with my kids?\n\nSo with that knowledge and appreciation, I decided to utilize the basic principles of [risk management](https://handbook.gitlab.com/handbook/security/security-assurance/security-risk/storm-program/index.html) to manage my family’s work and learn from home routine. But don’t worry, you don’t have to be a compliance professional to utilize this technique. In this blog post, I've mapped out the steps I used with my family that I hope will contribute to a more successful 2020/2021 school year for families.\n\nBefore you start, it is critical to remember, you can never fully eliminate risk. The steps below are designed to reduce the risk to align with your risk appetite. Only you can determine what level of risk you will accept. Some people, like myself, may be more risk-averse and therefore seek to plan out everything to the smallest detail. Others might be more risk-tolerant and willing to let things “slide” a little. No matter where you fall on that spectrum, you can utilize the steps below to document and execute a successful pandemic-driven remote learning plan.\n\n### 1: Identify\n\nThe first stage of risk management is to identify possible risks. If you don’t know what could go wrong you can’t prepare for it! It’s important to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) with each member of your family and understand their specific needs and concerns. As parents, we all know that each of our children has different needs. The same is true for their education: what works for one student won’t work for all students.\n\nLet’s consider last spring as our “trial run”. For remote learning, discuss with your children what they enjoyed about that time and what didn’t work. If possible, reach out to their previous year’s teachers for additional feedback. To ensure your remote work success during present times, it is also important to have a discussion with your boss and/or Human Resources department to set and understand expectations. Many employers have programs, like GitLab’s [Family and Friends Day](https://about.gitlab.com/company/family-and-friends-day/) to provide flexible schedules or supportive programs like what’s described in this [GitLab COVID-19 handbook page](/handbook/total-rewards/benefits/covid-19/#sts=Resources%20for%20COVID-19). The more people you talk to, the more data you can collect. And the more data you have now, the more prepared you will be for the next steps.\n\n### 2: Analyze\nOnce you have identified your risks, you can move on to analyzing them. Depending on how many people are in your family, the list of risks identified may be long. In my case, as a family of 7, we had around 15 items on our initial list when we undertook this exercise. As we began analyzing them, however, our list grew to almost 30.\n\nFor us, the easiest way to analyze these risks was to consider the impact these risks had on the family (or the individual) and the likelihood of them recurring. Then we asked why over and over until the true cause is identified.\n\nExample:\n**Student A (17):** The school provided the students with weekly packets where they read and complete worksheets. Student A was unable to complete many of the assignments and failed 2 classes.\n**Risk Identified:** Student A is concerned the school will do a similar process (paper packets) and he will continue to fail.\n**Impact:** If Student A fails another class, he won’t graduate on time.\n**Likelihood:** Depending on the classes and the course work, this could be highly likely.\n\nRoot cause analysis: Why did Student A fail?\n* Student A did not complete the packets for 2 of his classes, why?\n* Student A had trouble understanding the content, why?\n* Student A learns better with verbal instructions and opportunities to ask questions.\n\nIn this case, the root cause was that Student A needs more verbal instruction and oversight when being presented with new concepts.\n\nYou may also identify opportunities as part of this process. For example, in our house, Student C preferred using Google Classroom’s To-Do List functionality to track open assignments and was able to easily visualize his tasks. By identifying what went right, in addition to what went wrong, you are able to better shape your treatment plans in the next phase.\n\n### 3: Action\nOnce you have analyzed your risks and identified the root causes, you can move on to the action phase. This phase is often the most difficult to complete. If you knew how to do it the right way, you would have done it correctly in the first place, right? Actually, wrong. We learn a lot from failing! Some of the best plans go through multiple [iterations](https://handbook.gitlab.com/handbook/values/#iteration) before you find the right fit. The important thing is to focus on improvement.\n\nBelow is a snapshot of the action plans I developed with my family:\n\n| **Risk** | **Root cause** | **Treatment plan** |\n|:-------------|:-------------|:-------------|\n| Student A is concerned the school will employ a similar process (paper packets) and he will continue to fail. | Student A learns better with verbal instructions and opportunities to ask questions. | _Iteration 1_: Parent assists Student A in creating a schedule where Parent can review the instruction page with Student A and answer any questions up front. Student A then works on packets for 1 hour. If packet is not completed and/or student has questions, Student A asks Parent for assistance during Parent’s lunch break. \u003Cbr/> _Iteration 2_: If school changes format to online learning using Zoom, Student A will work with teacher on expectations and additional assistance. |\n| Parent is concerned about Student B’s social and emotional well-being. | Student B learns better when she can work in a group with her peers to solve problems. Student B is used to having a classroom of friends to support her. | _Iteration 1_: Parent sets up an iPad for Student B to contact her friends. \u003Cbr/> _Iteration 2_: Teacher sets up breakout rooms in Zoom for collaboration. |\n| Parent is concerned about internet bandwidth. | Up to 7 people are using the wireless to learn and work from home. | _Iteration 1_: Parent increases internet speeds and bandwidth. Parent moves router to offer wired connection to Parent’s laptop. \u003Cbr/> _Iteration 2_: Parent sets up router to support two bands- 2.4ghz and 5ghz. ** \u003Cbr/> _Iteration 3_: Parent replaces older devices that might be bandwidth hogs. \u003Cbr/> _Iteration 4_: Parent coordinates a “no meeting” block during peak school hours with employer. |\n\n** The 2.4ghz network is slower but can reach further. However, 2.4 is very prone to interference (such as microwaves). The 5ghz network is faster, but the signal is weaker.\n\nThe final step in the action phase is to discuss the plan(s) with all parties involved. Being [transparent](https://handbook.gitlab.com/handbook/values/#transparency) with teachers and your employer will be key to your success. In our case, we spoke to each of our children’s teachers and expressed our concerns. In many cases, your child’s teachers can add a lot of value to the action plans. The same is true for your employer. When you surface issues constructively, it allows you to be proactive in your response plan.\n\n### 4: Monitor\nNow that you have your plans in place, you need to find a way to determine if they are working. In order to track your results, you need a measure of success. Remember when I said that each person’s risk appetite is different? The same is true with measures of success. In our case, we decided to measure our children’s success based on two factors: attendance in virtual classes and completion of assignments. For our high school and college-age children, we set a 90% attendance goal with a B average across all classes. For our elementary-age children, we set an 85% attendance goal; however, 95% of assignments must be turned in. Each child also set a “stretch” goal to address something particularly challenging from the Spring. For example, Student B struggles with reading and her progress was stunted due to lack of reading support during the spring semester. She set a personal goal to get back to the level she was at by the end of the first term.\n\nAs you can see, the principles of risk management can be pretty handy in the real world. As you work through these steps with your family, Keeping GitLab’s values [CREDIT](https://handbook.gitlab.com/handbook/values/#credit) in mind can help guide the way.\n\n* **C**ollaborate: No one can solve this alone.\n* **R**esults: Focus on action and growth, not perfection.\n* **E**fficiency: Allow your kids self-learning opportunities, but step in when needed.\n* **D**iversity, Inclusion and Belonging: Build a safe community where everyone has input. This includes your family, their teachers, and your employer(s).\n* **I**teration: We all will fail. At some point, something will go wrong. But that’s ok! Learn from it and reassess the plan. It’s ok to change the plan if it isn’t working.\n* **T**ransparency: Openly discuss how your family is feeling about remote education and work. But remember, as the parent or caregiver, your tone will set the tone for the rest of the family. So be sure to be constructive and positive in your conversations. And, as cheesy as it sounds, print it out and post it! We have schedules, reminders, and signs posted all around our house to transparently communicate the expectations and ensure we are all working together to meet our collective goals.\n\nDoes this plan resonate with you?  Have a suggestion I missed including? Please leave a comment, I’d love to iterate on my family’s approach!\n\nCover image by [August de Richelieu](https://www.pexels.com/@august-de-richelieu) on [Pexels](https://pexels.com/)","unfiltered",[695,696,9],"security","inside GitLab",{"slug":698,"featured":6,"template":699},"applying-risk-management-to-remote-learning","BlogPost","content:en-us:blog:applying-risk-management-to-remote-learning.yml","Applying Risk Management To Remote Learning","en-us/blog/applying-risk-management-to-remote-learning.yml","en-us/blog/applying-risk-management-to-remote-learning",{"_path":705,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":706,"content":712,"config":722,"_id":724,"_type":13,"title":725,"_source":15,"_file":726,"_stem":727,"_extension":18},"/en-us/blog/aws-reinvent-recap",{"title":707,"description":708,"ogTitle":707,"ogDescription":708,"noIndex":6,"ogImage":709,"ogUrl":710,"ogSiteName":685,"ogType":686,"canonicalUrls":710,"schema":711},"Highlights from AWS re:Invent 2018","Catch up on what GitLab got up to at AWS re:Invent last week! Reinventing pipelines, emerging as a single application, theCUBE interviews, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679994/Blog/Hero%20Images/aws_booth_2018.jpg","https://about.gitlab.com/blog/aws-reinvent-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Highlights from AWS re:Invent 2018\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2018-12-06\",\n      }",{"title":707,"description":708,"authors":713,"heroImage":709,"date":715,"body":716,"category":300,"tags":717},[714],"Priyanka Sharma","2018-12-06","\n\nLast week GitLab was at AWS re:Invent 2018, the marquee event for cloud computing in the US. As the frontrunner in the space, Amazon has built re:Invent to be a juggernaut. This year it commanded most of the Las Vegas strip and had over 50,000 attendees. As a first-time visitor myself, I was impressed by the sheer scale and efficiency of the event. I was also thrilled to achieve my personal goal of giving my first talk with a live demo using code and GitLab. As for GitLab, we saw that our company emerged as a leader in the DevOps space with a single application for the whole software development lifecycle.\n\n## Highlights\n\n### Reinventing CI/CD pipelines\n\nOur CEO [Sid Sijbrandij](/company/team/#sytses) and I did a talk and live demo about reinventing CI/CD pipelines using GitLab, [Kubernetes](/solutions/kubernetes/), and EKS. This was our first hint that this re:Invent was going to be special. The talk was bursting at the seams with attendees, as we shared both the challenges of the toolchain crisis engulfing our ecosystem, and about how a single application for the entire DevOps lifecycle can make an improvement of over 200 percent in cycle times. You can [check out the presentation here](https://docs.google.com/presentation/d/1x1g4pfpoaav9lhcYkjAJylLMl-9S0JFTeKXlNF98O-I/edit?usp=sharing).\n\n![Sid Sijbrandij and Priyanka Sharma on stage at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_2018_sid_talk_stage.jpeg){: .shadow.medium.center}\n\nThe demo, which showed us running a CI/CD pipeline and deploying code to Kubernetes on EKS, is an example of the [cloud native workflows](/topics/cloud-native/) users can push via GitLab. It is such competency that makes Kubernetes on EKS a breeze and is the reason GitLab was awarded the [AWS Partner DevOps Competency Certification](/blog/gitlab-achieves-aws-devops-competency-certification/) to confirm our viability and excellence as a DevOps solution for companies using AWS Cloud.\n\n### Validation for our vision\n\nOur experience at re:Invent was one of validation and emergence. As a company, we saw that our efforts to build the first single application for the entire DevOps lifecycle have paid off and our users resonated with our message. Most folks who came to our booth were aware that GitLab played a part in multiple stages (if not all) of their workflow and many were avid [GitLab CI](/solutions/continuous-integration/) fans. Gone are the days when [version control](https://docs.gitlab.com/ee/topics/gitlab_flow.html) was the only thing GitLab was associated with.\n\n![Collage from GitLab at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_booth_collage.jpeg){: .medium.center}\n\nOur VP of Alliances, [Brandon Jung](/company/team/#brandoncjung), [appeared on theCUBE](https://www.youtube.com/watch?v=Ejs5xGAhL8s) with a company called Beacon. As the former head of partnerships at Google Cloud, Brandon has a long history with GitLab. He has seen the company grow over the years and shared how our rocketship ascent across the DevOps lifecycle convinced him of the potential. He said, \"In just over two years, [GitLab became the frontrunner for continuous integration](/blog/gitlab-leader-continuous-integration-forrester-wave/), according to Forrester. That's impressive.\"\n\n### Livestream with The New Stack\n\nI also represented GitLab on [a livestream podcast](https://www.pscp.tv/w/1eaJbODAepnxX) with [The New Stack](https://thenewstack.io/), [Matt Biilmann](https://twitter.com/biilmann?lang=en), CEO of [Netlify](/blog/netlify-launches-gitlab-support/), and [Joe Beda](https://twitter.com/jbeda), founder of [Heptio](https://heptio.com/) and creator of Kubernetes. We discussed GitOps, NoOps, and the toolchain crisis. As Matt wisely said, \"Trust in open source is critical to cloud computing and the ecosystem. Companies like GitLab will keep the players honest.\"\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">GitOps, NoOps and the tool chain crisis. \u003Ca href=\"https://t.co/mtfm8OaYYD\">https://t.co/mtfm8OaYYD\u003C/a>\u003C/p>&mdash; The New Stack (@thenewstack) \u003Ca href=\"https://twitter.com/thenewstack/status/1067881587214184448?ref_src=twsrc%5Etfw\">November 28, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe thank AWS for creating this amazing ecosystem of end users and practitioners who came together in Vegas last week. Next year will be bigger, better. Until then, see you all at [KubeCon](/events/)! 😃\n",[718,268,9,278,719,720,696,721],"CI","news","kubernetes","open source",{"slug":723,"featured":6,"template":699},"aws-reinvent-recap","content:en-us:blog:aws-reinvent-recap.yml","Aws Reinvent Recap","en-us/blog/aws-reinvent-recap.yml","en-us/blog/aws-reinvent-recap",{"_path":729,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":730,"content":736,"config":744,"_id":746,"_type":13,"title":747,"_source":15,"_file":748,"_stem":749,"_extension":18},"/en-us/blog/cd-automated-integrated",{"title":731,"description":732,"ogTitle":731,"ogDescription":732,"noIndex":6,"ogImage":733,"ogUrl":734,"ogSiteName":685,"ogType":686,"canonicalUrls":734,"schema":735},"GitLab’s automated and integrated continuous delivery","Learn about how the power of GitLab Auto DevOps can help increase productivity and speed up releases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681872/Blog/Hero%20Images/CD-2st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-automated-integrated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s automated and integrated continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-01-22\",\n      }",{"title":731,"description":732,"authors":737,"heroImage":733,"date":739,"body":740,"category":693,"tags":741},[738],"Cesar Saavedra","2021-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nOrganizations adopting DevOps best practices to software delivery spend time and effort designing, building, testing, integrating, and maintaining CI/CD pipelines for their different projects. Just as they must spend some of their time maintaining their business applications instead of innovating, they must do the same for their pipelines. Freeing your developers so that they can spend more of their time creating new business applications and differentiating value to the business is of utmost importance to remain competitive in a world where organizations must be digital leaders to succeed in the marketplace.\n\nGitLab provides [Auto DevOps](/topics/devops/), which are prescribed out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, they automatically detect, build, test, deploy, and monitor your applications. Auto DevOps save your developers from implementing their own pipelines so that they can spend more time innovating. In the following paragraphs, we go over how the power of Auto DevOps automates and integrates your continuous delivery to help increase productivity and speed up releases.\n\n## Enabling Auto DevOps\n\nIt’s very easy to enable Auto DevOps for your application. All you need to do is go to your Project Settings and select the configuration you desire for Auto DevOps. As the picture below depicts, you can select the deployment strategy to “Automatic deployment to staging, manual deployment to production”:\n\n![autodevops-on](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-on.png){: .shadow.medium.center.wrap-text}\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process.\n\nThe pipeline then deploys the application to staging for verification and then to production in an incremental fashion. Auto DevOps saves you and your developers from implementing your own pipelines so that you can spend more time innovating.\n\n## Auto DevOps stages and jobs\n\nThe stages and jobs of the Auto DevOps pipeline vary according to the way you configured it. You can also customize the prescribed Auto DevOps pipeline or reuse only portions of it. Let’s review the prescribed stages and jobs for a simple Java application.\n\n1) First you find the Build stage. Auto Build creates a build of the application using an existing Dockerfile or buildpacks. The resulting Docker image is pushed to the built-in Container Registry. \n\n![auto-build](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-build.png){: .shadow.medium.center.wrap-text}\n\nAll these steps are automatically executed on your application so that you can spend more time delivering value to the business.\n\n2) Next is a variety of tests under the Test stage. Auto DevOps includes jobs for static analysis and code checks, For identifying security issues in containers, For analyzing project dependencies and security issues, For scanning license dependencies, For detecting credentials and secrets exposure, For running security analysis of Java code, And for specific unit tests for the language and framework.\n\n![auto-test](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-test.png){: .shadow.medium.center.wrap-text}\n\nAll these tests increase the quality of code, compliance and reliability that translate into a highly resilient production environment.\n\n3) The review stage contains a single job that spins up an ephemeral environment to be used by the Dynamic Application Security Testing or DAST. Likewise the Dast stage has the job, Auto Dynamic Application Security Testing, which analyzes the current code and checks for potential security issues by running (Open Web Application Security Project) OWASP-related tests.\n\n![auto-review-dast](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-dast.png){: .shadow.medium.center.wrap-text}\n\n4) The prescribed stages and jobs in Auto DevOps vary depending on how you configure it. In this example, the user has selected “Automatic deployment to staging, manual deployment to production” when enabling Auto DevOps, so towards the CD portion of the pipeline, we see the staging stage, which contains a single job. The staging job deploys the user’s application to the staging environment. It will also instantiate the staging environment, if needed.\n\n![auto-staging](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-staging.png){: .shadow.medium.center.wrap-text}\n\n5) The production stage is manual and contains four jobs to incrementally deploy his application to production. An incremental rollout decreases the risk of a production outage or downtime. By releasing production changes gradually, error rates or performance degradation can be monitored, and if there are no problems, all of production can be updated.\n\n![auto-prod](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-prod.png){: .shadow.medium.center.wrap-text}\n\n6) The user has been prescribed a performance stage with a single job with the same name. Auto Browser Performance Testing measures the browser performance of each web page and reports on any degradation or improvement so that appropriate action can be taken.\n\n![auto-browser-perf](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-browser-perf.png){: .shadow.medium.center.wrap-text}\n\n7) The last stage is the cleanup stage, which contains a job that brings down and frees all resources of the ephemeral DAST environment that was brought up earlier in the CI portion of the pipeline.\n\n![auto-cleanup](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-cleanup.png){: .shadow.medium.center.wrap-text}\n\nThis entire prescribed CI/CD pipeline, with all its stages and jobs, is based on best practices and is automatically run for the user’s project saving them time and effort from developing their own pipeline.\n\n## Auto Review Apps\n\nAs developers collaborate on a project, Auto DevOps automatically includes Auto Review Apps, which stands up an ephemeral environment for stakeholders to review the running application with proposed changes before they are merged to the main branch. The teardown and freeing of the resources of the ephemeral review environment are also automatically done by Auto DevOps once the merge takes place.\n\n![auto-review-apps](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-apps.png){: .shadow.medium.center.wrap-text}\n\n## Modifying Auto DevOps\n\nHere are some ways that you can modify Auto DevOps.\n\n1) **Customization via environment variables**. If you would like to skip some of the stages and jobs in Auto DevOps, you can do this via project variables. For example, say you are using all open source licensed software within your project and you are pretty confident about your web application performance, and you’d also like to add the ability to do canary deployments. You can customize Auto DevOps via environment variables to skip the license-scanning and performance jobs and add canary deployments to your project by creating and setting specific environment variables as shown below.\n\n![auto-env-vars](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-env-vars.png){: .shadow.medium.center.wrap-text}\n\nYou could also use the GitLab APIs to script these modifications if he so desired.\n\n2) **Customization by editing the DevOps pipeline**. Another way to customize the Auto DevOps pipeline is by adding it to your own project and then making changes to it.\nBelow you can see a screen snapshot of an Auto DevOps pipeline edit where LICENSE_MANAGEMENT and web PERFORMANCE tests are being disabled.\n\n![autodevops-pipeline-edit](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-pipeline-edit.png){: .shadow.medium.center.wrap-text}\n\n3) **Customization by using only portions of Auto DevOps**. You could also leverage portions of Auto DevOps in your own pipeline by including specific templates. In the smaller pipeline below, only the Auto Build and Auto Test capabilities of Auto DevOps are being reused.\n\n![autodevops-portions](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-portions.png){: .shadow.medium.center.wrap-text}\n\nThe power of Auto DevOps automates and integrates your continuous delivery to help speed up your releases by saving you time from having to write your own pipelines. By using Auto DevOps you can accelerate your product delivery times and bring differentiating application features faster to market.\n\nIf you’d like to see the power of GitLab Auto DevOps in action, watch this [video](https://youtu.be/blJT8f6ZDH8).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nPhoto by [Tim Carey](https://unsplash.com/@baudy?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/formula-1?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[742,743,9],"CD","DevOps",{"slug":745,"featured":6,"template":699},"cd-automated-integrated","content:en-us:blog:cd-automated-integrated.yml","Cd Automated Integrated","en-us/blog/cd-automated-integrated.yml","en-us/blog/cd-automated-integrated",{"_path":751,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":752,"content":758,"config":764,"_id":766,"_type":13,"title":767,"_source":15,"_file":768,"_stem":769,"_extension":18},"/en-us/blog/cd-solution-overview",{"title":753,"description":754,"ogTitle":753,"ogDescription":754,"noIndex":6,"ogImage":755,"ogUrl":756,"ogSiteName":685,"ogType":686,"canonicalUrls":756,"schema":757},"How to use GitLab tools for continuous delivery","Learn how to use GitLab technology to release software faster and with less risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682956/Blog/Hero%20Images/CD-continuous-nature-cover-880x586.jpg","https://about.gitlab.com/blog/cd-solution-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab tools for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":753,"description":754,"authors":759,"heroImage":755,"date":760,"body":761,"category":762,"tags":763},[738],"2020-12-17","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-04-01.\n\nEach organization is unique in how they adopt continuous delivery (CD) principles, but the journey to modernize and enhance your software release process can be conducted in phases. In this blog post, we unpack some of the tools companies can use to adopt continuous delivery (CD), and explain how companies can reach continuous delivery in three key stages. The good news is, regardless of how you get there, GitLab offers a solution that allows companies to modernize their release process at their own pace and in their own way.\n\n## Consolidate disparate tools into a single platform\n\nThe first step to reaching [continuous delivery](/topics/continuous-delivery/) is to consolidate the number of disparate tools in your pipeline by using the tools and capabilities baked into the GitLab product. In this section, we summarize some of the fundamental components of GitLab and give examples of how they work.\n\nGitLab users can track issues and merge requests using [milestones](https://docs.gitlab.com/ee/user/project/milestones/#milestones), which also help with setting time-bound goals. Milestones can be used as Agile sprints and releases, and allow you to organize issues and merge requests into a one group, with an optional start date and an optional due date.\n\n![Example of GitLab milestone from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/milestone.png)\nScreenshot shows example milestone in GitLab.\n\n[Issues are a fundamental tool in GitLab](https://docs.gitlab.com/ee/user/project/issues/#issues), and include many components to help users communication information about product problems, new features, and more.\n\n[Merge requests (MRs) are created to merge one branch into another](https://docs.gitlab.com/ee/user/project/merge_requests/). MRs are also where solutions are developed and is a key input to the release planning process.\n\nBoth issues and MRs are core components of a release and allow for the audit and tracking of application changes created by a large group of DevOps engineers, system administrators, and developers. We often use Epics in the release planning process. [Epics are used to track groups of issues with the same theme](https://docs.gitlab.com/ee/user/group/epics/#epics). In the example below, an Epic was created for all the UI-related issues in a project.\n\n![Example of GitLab epic for frontend work](https://about.gitlab.com/images/blogimages/cd-solution-overview/epic.png)\nAn example of an Epic for frontend work in GitLab.\n\n[Iterations are a relatively new tool that allows users to track issues over time](https://docs.gitlab.com/ee/user/group/iterations/#iterations) and helps to track velocity and volatility metrics. Iterations can also be used with milestones and can track a project's sprints using the detailed iterations pages, which include many progress metrics.\n\n![Example iteration from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/iteration.png)\nThis screenshot shows an example of how iterations work in GitLab.\n\nThe [Roadmap tool assembles epics, milestones, and iterations in a timeline format](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap), which makes it easier to visually track all progress toward a release and helps the user streamline the release process.\n\n![Example of roadmap from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/roadmap.png)\nThis screenshot shows an example of roadmap in GitLab.\n\nGitLab offers many approval gates for your release. Set a [deploy freeze window](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html) to temporarily suspend automated deployments to production. The deploy freeze window prevents unintended production releases during a particular time frame to help reduce uncertainty and risk of unscheduled outages.\n\n![Example of deploy freeze window from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/freeze.png)\nThis screenshow shows an example deploy freeze window in GitLab.\n\nRelated to the deploy freeze window, users can protect the production environment for a release to prevent unintentional releases. Deploy freeze windows protect the production environment by specifying who is allowed to deploy to the environment. Assigning specific roles and responsibilities streamlines the approval gates and release process.\n\n![protected-env](https://about.gitlab.com/images/blogimages/cd-solution-overview/protected-env.png)\n\nWhen it's ready, the [user can create the release which automatically generates the release evidence](https://docs.gitlab.com/ee/api/releases/#collect-release-evidence). This streamlined process helps reduce release cycle times.\n\n![Example of release evidence from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/release-evidence.png)\nHere is an example of release evidence from a demo project in GitLab.\n\n## Implement continuous delivery\n\nThe capabilities described above help to establish some best practices for software continuous delivery. In this next phase of the CD cycle, every change is automatically deployed to the User Acceptance Testing env/Staging (with a manual deployment to production). In this scenario, there is no need for a deploy freeze, and the release manager can cut a release from staging at any point in time.\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) helps users automatically create the release pipeline and relieves them from manually creating a pipeline. With Auto DevOps, users can automatically deploy to the staging environment and manually deploy to production and enable canary deployments. Auto DevOps, which is based on DevOps best practices, helps you streamline the release process.\n\n![Example of enabling Auto DevOps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/enable-auto-devops.png)\nHow to enable Auto DevOps in GitLab.\n\nThe first job in Auto DevOps is the build job, as shown below:\n\n![build-job](https://about.gitlab.com/images/blogimages/cd-solution-overview/build-job.png)\nThe build job in GitLab Auto DevOps.\n\nThe build job applies the appropriate build strategy to create a Docker image of the application and stores it in the built-in Docker Registry.\n\n![Example of container registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/container-registry.png)\nSee the example of a container registry in GitLab.\n\nFaster and more reliable releases happen when you have build components like Docker images that are consistent, uniform, and readily available throughout the release process. GitLab also includes a built-in [Package Registry](https://docs.gitlab.com/ee/user/packages/) that supports many packaging technologies.\n\n![Example of package registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/package-registry.png)\nHere's what the package registry looks like in GitLab.\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps) allow the user to visualize what features will go into production. As updates are made to the application via MRs, the MRs kick off Review Apps, which streamlines the review process, including the automatic creation and destruction of an ephemeral review environment. Using Review Apps, stakeholders can verify the updates to the application before the changes are merged to the main line. Review Apps help increase code quality reducing the risk of unexpected production outages.\n\n![Example Review Apps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/review-apps.png)\nAn example of Review Apps in GitLab from a demo project.\n\nOnce an application is built and passes many automated tests, checks and verifications, the Auto DevOps pipeline automatically stands up a staging environment and deploys the application to staging.\n\n![Example staging environment from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/staging-env.png)\nAn example staging environment in GitLab.\n\nAt this point, a user can manually deploy the updated application as a canary deployment to the production environment. In doing so, a user ships features to only a portion of the pods fleet and watches their behavior as users visit the temporarily deployed feature. If everything checks out, the next step is to deploy the feature to production. After deploying to production, roll out the Canary deployment to 50% of the production pods. Incremental rollouts lower the risk of production outages and delivers a better user experience and customer satisfaction. Advanced deployment techniques, like canary, incremental, and Blue-Green also improve development and delivery efficiency, and streamlines the release process.\n\n![Example incremental rollout from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollout.png)\nHow incremental rollout works in GitLab.\n\n![live-env-button](https://about.gitlab.com/images/blogimages/cd-solution-overview/live-env-button.png)\nTo check the running application for integrity, you can click on the \"Open live environment\" button.\n\nClicking this button will open up the application in a different browser tab. But what if you run into an application error? As shown below:\n\n![Example application error from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/app-error.png)\nThis is what an application error will look like in GitLab.\n\nIf you encounter an app error, you could decide to perform a rollback by drilling down into the production environment page and identifying the release that had been running before the last deployment. This page is an auditable sequence of changes that have been applied to the production environment. The rollback process starts with the click of a button. Rollbacks speed up recovery of production in case of failures and lowers outage times, which improves the user experience.\n\n![Example rollback from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollback.png)\nRollback in GitLab to speed up production recovery.\n\nPipelines usually run automatically, but to schedule a pipeline once a day at midnight, for example, so staging can have the most recent version of the application each day, go to CI/CD->Schedules. Scheduling pipelines can improve the efficiency of the development life cycle and release processes.\n\n![Example of pipeline scheduling from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-sched.png)\nHow to schedule a pipeline to run in the future.\n\nWhile the application is running in production, track how the release is performing and quickly identify and troubleshoot any production issues. There are a few ways to do this. One way is to access the \"Monitoring\" feature for a specific environment to track system and application metrics, such as system and pod memory usage, and the number of cores used. The monitoring tracking includes markers (small rocket icon) when updates were introduced to the environment, so that fluctuations in the metrics can be correlated to a specific update.\n\n![Example monitoring capabilities from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/monitoring.png)\nExplore monitoring capabilities in GitLab.\n\nMonitoring reduces the time to identify, resolve and preempt production problems, which lowers the risk of unscheduled outages. It also provides an opportunity for monitoring business activity and optimizes cloud costs. This type of monitoring is not only useful to release managers but also to DevOps engineers, application operators, and platform engineers.\n\nAnother way to monitor the release is by creating alerts to detect out-of-range metrics, which are visible on the overall operations metrics dashboard as well as on each specific environment window. Alerts can also automatically trigger ChatOps and email messages to appropriate individuals or groups.\n\n![Example alerts from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts.png\nExample alerts in GitLab.\n\nYou can manage alerts from the [Operations Alerts window](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), a single location from which you can assess and handle alerts, which may include the manual or automatic rollback of a release.\n\n![Example alerts dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts-window.png)\nWhat the he alerts dashboard looks like on GitLab.\n\nUsers can track and monitor the release progress through [Value Stream Analytics](https://docs.gitlab.com/ee/development/value_stream_analytics.html#value-stream-analytics-development-guide), where you can check your project or group statistics over time and see how your team improves in the number of new issues, commits, deploys, and deployment frequency. Value Stream Analytics is useful to quickly determine the velocity of a given project. It points to bottlenecks in the development process, allowing management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle.\n\n![Example value stream analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/value-stream.png)\nValue stream analytics in GitLab.\n\nLastly, another way to track and monitor the release is through [Pipeline analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#pipeline-success-and-duration-charts). Pipeline analytics shows the history of your pipeline successes and failures, as well as how long each pipeline runs. This helps explain the health of your projects and their continuous delivery.\n\n![Example pipeline analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-analytics.png)\nScreenshot shows example pipeline analytics in GitLab.\n\nThe [Operations dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/#operations-dashboard) can contain more than one project, and allows users to oversee more than one release. This dashboard provides a summary of each project's operational health, including pipeline and alert status.\n\n![Example operations dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/ops-dashboard.png)\nExample of operations dashboard in GitLab.\n\nRelease managers can also access the [environments dashboard](https://docs.gitlab.com/ee/ci/environments/environments_dashboard.html#environments-dashboard) to provide a cross-project, environment-based view that lets you see the big picture of what is happening in each environment.\n\n![Example environments dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/env-dashboard.png)\nThe environments dashboard in GitLab.\n\nAnother option is to drill down into a specific environment to see all the updates applied to the environment.\n\n![Example production environment dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/prod-env-dashboard.png)\nThe production environment dashboards shows all updates applied to the environment.\n\nAll these dashboards offer operations insights that are necessary to understand how a release is performing in production and quickly identify and troubleshoot any production issues.\n\n## Implement continuous deployment\n\nThe third phase in the journey is continuous deployment, where users can send updates directly to production. Instead of manually triggering deplyments, continuous deployment sends changes to production production auomatically (no human intervention is required). Teams can only achieve continuous deployment once continuous delivery is already in place.\n\nTo introduce a feature to a segment of end-users in a controlled manner in production, create [feature flags](/blog/feature-flags-continuous-delivery/). Feature flags help reduce risk and let the user conduct controlled tests and separate feature delivery from customer launch.\n\n![Example feature flag from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/feature-flag.png)\nFeatures flags in GitLab.\n\nA project's audit events dashboard will record what user introduced a feature flag.\n\n![Example audit events dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/events-dashboard.png)\nScreenshot shows example audit events dashboard in GitLab.\n\nCheck security and compliance-related items of the project by visiting the [Security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#gitlab-security-dashboards-and-security-center).\n\n![Example security dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/sec-dashboard.png)\nThe security dashboard in GitLab.\n\nThese dashboards help you preempt out-of-compliance scenarios to avoid penalties. They also streamline audits, provide an opportunity to optimize cost, and lower risk of unscheduled production outages.\n\nWe have reviewed how GitLab can help you make your releases safe, low risk, worry-free, consistent, and repeatable.\n\nWhether you are just starting your journey into DevOps, or already in the midst of implementing DevOps processes, [GitLab's continuous delivery](/solutions/continuous-integration/) can help you every step of the way with capabilities built on DevOps and CD best practices.\n\n## Watch and learn\n\nMore of a video person? Tune in below to see GitLab’s continuous delivery solution in action.\n\n\u003Chttps://www.youtube-nocookie.com/embed/L0OFbZXs99U>\n\nFor more information, visit [LEARN@GITLAB](/learn/).\n","engineering",[742,743,9],{"slug":765,"featured":6,"template":699},"cd-solution-overview","content:en-us:blog:cd-solution-overview.yml","Cd Solution Overview","en-us/blog/cd-solution-overview.yml","en-us/blog/cd-solution-overview",{"_path":771,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":772,"content":778,"config":783,"_id":785,"_type":13,"title":786,"_source":15,"_file":787,"_stem":788,"_extension":18},"/en-us/blog/cd-unified-monitor-deploy",{"title":773,"description":774,"ogTitle":773,"ogDescription":774,"noIndex":6,"ogImage":775,"ogUrl":776,"ogSiteName":685,"ogType":686,"canonicalUrls":776,"schema":777},"GitLab's unifiied and integrated monitoring strategies","Learn about GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681771/Blog/Hero%20Images/CD-1st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-unified-monitor-deploy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unifiied and integrated monitoring strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":773,"description":774,"authors":779,"heroImage":775,"date":780,"body":781,"category":693,"tags":782},[738],"2020-11-23","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA well integrated and consistent approach to monitoring what is running in production and how it is running can provide not only useful information about the infrastructure and applications but also a feedback loop about how your end users are utilizing your business applications. The ability to visualize what goes into production, what to deploy to production, and who to deploy it to can provide organizations the data to help them select and prioritize capabilities that matter to their customers. In addition, the ability to monitor performance and tracing of deployments allows them to preempt production problems, quickly troubleshoot issues and rollback a release, if needed.\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\n\nLet’s first delve into how GitLab provides the capabilities to quickly release, identify production problems and quickly roll back.\n\nFor a release manager, the Environment Dashboard provides a cross-project environment-based view with the big picture of what is going on in each environment:\n\n![environment dashboard](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Env-dashboard.png){: .shadow.medium.center.wrap-text}\n\nThe Environment Dashboard also gives easy access to the CD pipeline. In the picture above, clicking on the “blocked” link takes you to the CD pipeline view:\n\n![CD pipeline](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline.png){: .shadow.medium.center.wrap-text}\n\nFrom the CD pipeline, a release manager can perform a canary deployment and also roll out to production incrementally, for example. The performance job above runs web browser performance tests and determines any degradation or improvement in the measurements and reports them as shown below:\n\n![webperf errors](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Review-webperf-errors.png){: .shadow.medium.center.wrap-text}\n\nA release manager can take this information into consideration to determine whether or not these errors warrant a rollback of the release from production.\n\n![rollback button](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Rollback-click.png){: .shadow.medium.center.wrap-text}\n\nFrom the production environment window, depicted above, clicking on the rollback environment button, will reset the production to its previous working state.\n\nIT teams often run into issues when building and releasing software and without direct user feedback, they often build out too many features, many of which go unused. Without the ability to test in production, IT organizations spend more time on testing, prolonging release cycles, but quality is only marginally improved. Modern IT teams can overcome these issues by using experimentation systems capabilities, such as feature flags and canary deployments.\n\n![feature flags screen](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/ff-screen.png){: .shadow.medium.center.wrap-text}\n\nGitLab supports Feature Flags as shown above. In the example, the defined feature flag named “prods-in-alpha-order-ff” has three strategies:\n\n- For the production environment: provide the feature to 50% of users based on the availability of their IDs\n\n- For the staging environment: provide the feature to the users listed in the user list “prods-in-alpha-order-user-list”\n\n- For the review environment: provide the feature to only one user.\n\nFeature Flags can also be combined with canary deployments. For example, in the picture below, the release manager has chosen to release the canary to half of the nodes in production:\n\n![50 percent rollout](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline-50-percent.png){: .shadow.medium.center.wrap-text}\n\nAnd this combined deployment can be visualized via the deploy board as follows:\n\n![deploy board](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/deploy-board.png){: .shadow.medium.center.wrap-text}\n\nAbove, production has four nodes, two of which are running the new canary deployment, and the other two are still running the current production deployment.\n \nThe combination of canary deployments and feature flags can help gather direct users’ feedback to determine what features are relevant to them, so that an IT organization can focus on these, to shorten release cycle times and deliver higher quality and differentiating value to their users.\n\nLastly, integrated monitoring plays an important role in the feedback loop for these advanced deployment strategies and experimentation systems. With GitLab’s unified and integrated monitoring, you can track system and application metrics cluster-wide as well as per pod.\n\n![clusterwide monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/clusterwide-monitoring.png){: .shadow.medium.center.wrap-text}\n\nIn the picture above, you can see the dashboards that monitor clusterwide metrics. And the picture below shows the dashboards that monitor pod-specific metrics:\n\n![podspecific monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/podspecific-monitoring.png){: .shadow.medium.center.wrap-text}\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\nIf you’d like to see some of GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies in action, watch this [video](https://youtu.be/ihdxpO5rgSc).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\n\n",[108,743,9],{"slug":784,"featured":6,"template":699},"cd-unified-monitor-deploy","content:en-us:blog:cd-unified-monitor-deploy.yml","Cd Unified Monitor Deploy","en-us/blog/cd-unified-monitor-deploy.yml","en-us/blog/cd-unified-monitor-deploy",{"_path":790,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":791,"content":797,"config":806,"_id":808,"_type":13,"title":809,"_source":15,"_file":810,"_stem":811,"_extension":18},"/en-us/blog/considerations-for-going-hybrid-remote",{"title":792,"description":793,"ogTitle":792,"ogDescription":793,"noIndex":6,"ogImage":794,"ogUrl":795,"ogSiteName":685,"ogType":686,"canonicalUrls":795,"schema":796},"What to consider when going hybrid","Hybrid-remote is an alluring alternative to all-remote, but requires careful consideration. Here's what you need to know when making the shift.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681897/Blog/Hero%20Images/san_francisco_skyline_dm.jpg","https://about.gitlab.com/blog/considerations-for-going-hybrid-remote","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What to consider when going hybrid\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2021-02-17\",\n      }",{"title":792,"description":793,"authors":798,"heroImage":794,"date":800,"body":801,"category":802,"tags":803},[799],"Darren Murph","2021-02-17","\n\nAs the working world embraces the reality that we aren't going back to old ways of working, a growing chorus of leaders are forecasting a [hybrid-remote](/company/culture/all-remote/hybrid-remote/) future. While the allure of this concept is understandable — it seems to present the best of two worlds on paper — a great deal of nuance lurks.\n\n\u003Cblockquote class=\"twitter-tweet tw-align-center\">\u003Cp lang=\"en\" dir=\"ltr\">Sorry to break it to all of the remote-only people, but I think offices will make a comeback.\u003C/p>&mdash; Allison Barr Allen (@abarrallen) \u003Ca href=\"https://twitter.com/abarrallen/status/1349539596242075648?ref_src=twsrc%5Etfw\">January 14, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nIn fact, without great deliberation, care, and intentionality, hybrid can deliver the *worst* of both worlds. If you're charging down this road, you'll want to consider and plan for the points below to minimize dysfunction and the toxic friction of a [two-tier work environment](/company/culture/all-remote/what-not-to-do/).\n\n## Only some days in the office\n\nCompanies that mandate or encourage one or more days per week in-office should be mindful of three important factors:\n\n1. This inhibits team members from considering drastically different living locales, because they still need to be within a commutable distance to an office.\n1. This prevents a company's sourcing and recruiting teams from operating differently compared to all-colocated. New hires will still need to relocate to the general office area, limiting your talent pool.\n1. This will make the process of shifting to remote-first workflows more difficult, as the office will serve as a crutch to collaboration.\n\n## Informal meetings\n\nInformal (or unscheduled and unplanned) meetings in an office can be highly disruptive to hybrid-remote teams. While it may feel efficienct to ask someone you see in a hallway for a few minutes of their time, this typically creates disruption in the day of the person you're hailing and leads to undocumented progress. Any progress made in an informal conversation is invisible to those outside of the office *as well as* others in the office who are not invited to the meeting. Unplanned meetings with undocumented results works against the remote-first practice of documenting all work so that others in the organization can contribute.\n\nLeaders should reinforce a particular rigor on documenting takeaways after informal meetings so that context is agreed-upon, visible to others regardless of their location, and to minimize miscommunication and gossip.\n\n## Redesigned spaces for individual meeting rooms\n\nHybrid calls are also [suboptimal for remote attendees](/company/culture/all-remote/meetings/#avoid-hybrid-calls). We recommend leaders transitioning to hybrid-remote consider redesigning existing office space to optimize for individual workspaces and individual meeting rooms. This reinforces that the office is simply [another venue to work remotely from](/company/culture/all-remote/how-to-work-remote-first/#offices-are-simply-venues-to-work-remotely-from).\n\nBy eliminating conference rooms, a company ensures collaboration is accessible to all and removes the temptation to have in-office team members gather around a single camera for a video call with remote attendees.\n\nLeaders may consider keeping one or two large spaces that can be reserved for team onsites, where entire teams or sub-teams will intentionally travel on specific dates to meet in person (e.g., fiscal year planning, team bonding, etc.). It's important to still document outcomes from these gatherings and ensure that 100% of the team is included.\n\n\u003Cblockquote class=\"twitter-tweet tw-align-center\">\u003Cp lang=\"en\" dir=\"ltr\">I have worked from home for most of my 20+ year career and never ever had so many calls and meetings. I&#39;ve kept it to myself for a full year but I cannot anymore: y&#39;all are doing this wrong\u003C/p>&mdash; Amy Westervelt (@amywestervelt) \u003Ca href=\"https://twitter.com/amywestervelt/status/1353902805048647686?ref_src=twsrc%5Etfw\">January 26, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n## Agendas upfront\n\nThe most functional hybrid organizations operate [remote-first](/company/culture/all-remote/how-to-work-remote-first/). This ensures that business continues even if 100% of the workforce opts to work remotely, outside of the office, on any given day. A key part of reinforcing this mindset is the mandate that all work meetings have an upfront agenda.\n\nPractically speaking, this means that all in-office meeting invites have a shared agenda document attached, so that others can read, learn, and contribute regardless of their location (or even if they're awake and available during the meeting time). This process ensures that a [live doc meeting](/company/culture/all-remote/live-doc-meetings/) procedure happens even for onsite meetings.\n\nThis is critical for process continuity regardless of where a team member is located. In a hybrid organization, you will have team members who conduct onsite meetings some days, and remote meetings on other days. It's vital that the *process* of those meetings are the same – it's merely the physical position of a team member that changes.\n\n## Coffee chats should be indiscriminate of location\n\n[Coffee chats](/company/culture/all-remote/informal-communication/#coffee-chats) are an excellent way to broaden one's perspective and meet new people from across the organization. Hybrid organizations should take care to not enable selective coffee chat pairing based on who is onsite and who is remote, as it signals a two-tier work environment.\n\n## Record important conversations\n\nThe proximity of people in an office makes hallway, watercooler, and ad hoc conversations appealing. Leaders in hybrid-remote settings should reinforce the importance of using a smartphone as a recording device to capture important, non-confidential work-related conversations, with the consent of both parties. Recording conversations ensure that takeaways can be shared transparently with those outside of the office and minimizes potential misinterpretations.\n\n\u003Cblockquote class=\"twitter-tweet tw-align-center\">\u003Cp lang=\"en\" dir=\"ltr\">Want to make hybrid work? Start at the top.\u003Cbr> \u003Cbr>People want flexibility, a remote-office blend. But allowing flexibility without addressing how execs work risks “faux flex.”\u003Cbr>\u003Cbr>Changing where &amp; how senior execs show up will make or break hybrid.\u003Ca href=\"https://twitter.com/hashtag/futureofwork?src=hash&amp;ref_src=twsrc%5Etfw\">#futureofwork\u003C/a>\u003Ca href=\"https://t.co/H7obOrKlHl\">https://t.co/H7obOrKlHl\u003C/a>\u003C/p>&mdash; Brian Elliott (@brianpelliott) \u003Ca href=\"https://twitter.com/brianpelliott/status/1353744550724943872?ref_src=twsrc%5Etfw\">January 25, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n## Leadership's place in the office\n\nThe best place for leaders and executives to be in a hybrid-remote environment is *[outside](/company/culture/all-remote/transition/#make-the-executive-team-remote)* of the office.\n\n1. This prevents remote team members from a perceived lack of \"face time\" with executives.\n1. This prevents senior leadership from conducting their work in ways that are counter to remote-first principles.\n1. This prevents cognitive dissonance from leadership on what tools, technologies, and training need to be prioritized to support remote-first workflows.\n1. This prevents team members from coming to the office to rub shoulders with executives.\n1. This reinforces that the office is no longer the [epicenter](/company/culture/all-remote/stages/#7-remote-first) of power or decision making.\n\n## Spontaneous social events\n\nIt's understandable for team members to want to gather socially in and around office settings. Structuring [informal communication](/company/culture/all-remote/informal-communication/) is vital in a remote setting, and some companies may choose to repurpose some of their office space to accommodate groups and gatherings. Libraries, fitness centers, game rooms, and music studios (among others) could be created to facilitate social gatherings for those who are onsite on any given day.\n\nLeaders who enable this should be mindful of the following:\n\n1. It's important to budget for travel to include remote team members in onsite social events.\n1. Work should not happen in social rooms, because it hinders [transparency](https://handbook.gitlab.com/handbook/values/#transparency) and creates [dysfunction](https://handbook.gitlab.com/handbook/values/#five-dysfunctions) by forming communication silos.\n\n\u003Cblockquote class=\"twitter-tweet tw-align-center\">\u003Cp lang=\"en\" dir=\"ltr\">&quot;Relative to expectations, how has work from home turned out?&quot;\u003Cbr>\u003Cbr>Expansive research on work-from-home from \u003Ca href=\"https://twitter.com/Stanford?ref_src=twsrc%5Etfw\">@Stanford\u003C/a>, \u003Ca href=\"https://twitter.com/ChicagoBooth?ref_src=twsrc%5Etfw\">@ChicagoBooth\u003C/a>, \u003Ca href=\"https://twitter.com/ITAM_mx?ref_src=twsrc%5Etfw\">@ITAM_mx\u003C/a>, and \u003Ca href=\"https://twitter.com/Jose_MariaRD?ref_src=twsrc%5Etfw\">@jose_mariard\u003C/a> 🌎\u003Cbr>\u003Cbr>(Some well-considered comments in the \u003Ca href=\"https://twitter.com/newsycombinator?ref_src=twsrc%5Etfw\">@newsycombinator\u003C/a> thread as well)\u003Ca href=\"https://t.co/gvanMImy5Y\">https://t.co/gvanMImy5Y\u003C/a> \u003Ca href=\"https://t.co/Ig1X2PDBQH\">pic.twitter.com/Ig1X2PDBQH\u003C/a>\u003C/p>&mdash; Darren Murph (@darrenmurph) \u003Ca href=\"https://twitter.com/darrenmurph/status/1353879546358095873?ref_src=twsrc%5Etfw\">January 26, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n## Equitable benefits and perks\n\nLeaders should carefully evaluate spoken and unspoken perks of the office, and seek to extend equal benefits to those outside of the office. For example, access to an onsite daycare and fitness center would demand a childcare and fitness credit for those who are remote by default. This situation becomes particularly tricky for team members who are onsite some days of the week, and offsite others, unless the credits are extended to all.\n\n## Expect rapid iteration\n\nHybrid-remote organizations may see high office use in the early days of a workplace transition, as people flock to the familiar. However, as remote-first workflows are implemented and people relocate or change their workplace setting for personal reasons, it's possible that more space will go unused.\n\nWhile this may seem jarring, it's a positive indicator that work and culture are progressing without the need of an office. This will create opportunities to capture greater real estate savings and/or repurpose office space for philanthropic efforts, such as opening up an internship center for the local community.\n\nTo assist with the transition, enroll in our \"[How to Manage a Remote Team](https://www.coursera.org/learn/remote-team-management)\" course on Coursera, and download [GitLab's Remote Playbook](https://learn.gitlab.com/suddenlyremote).\n","culture",[804,805,9],"remote work","careers",{"slug":807,"featured":6,"template":699},"considerations-for-going-hybrid-remote","content:en-us:blog:considerations-for-going-hybrid-remote.yml","Considerations For Going Hybrid Remote","en-us/blog/considerations-for-going-hybrid-remote.yml","en-us/blog/considerations-for-going-hybrid-remote",{"_path":813,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":814,"content":820,"config":826,"_id":828,"_type":13,"title":829,"_source":15,"_file":830,"_stem":831,"_extension":18},"/en-us/blog/container-security-in-gitlab",{"title":815,"description":816,"ogTitle":815,"ogDescription":816,"noIndex":6,"ogImage":817,"ogUrl":818,"ogSiteName":685,"ogType":686,"canonicalUrls":818,"schema":819},"Get better container security with GitLab: 4 real-world examples","Containers are increasingly popular – and increasingly vulnerable. Using\nfour threat scenarios, we step through how GitLab's built-in security\nfeatures will make containers safer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/container-security-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get better container security with GitLab: 4 real-world examples\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thiago Figueiró\"}],\n        \"datePublished\": \"2020-07-28\",\n      }",{"title":815,"description":816,"authors":821,"heroImage":817,"date":823,"body":824,"category":695,"tags":825},[822],"Thiago Figueiró","2020-07-28","The number of web applications hosted in containers grows every day, but\ndata from our 2020 Global DevSecOps Survey showed a majority of companies\ndon't have a [container\nsecurity](/topics/devsecops/beginners-guide-to-container-security/) strategy\nin place. This post shows examples of how GitLab can help increase the\nsecurity of such applications and their hosting environment. We focus on web\napplications, but most of the security features described in this post apply\nto any containerized apps.\n\n\nDetailed descriptions and examples of the tactics and techniques mentioned\nin this post can be found in the [MITRE ATT&CK\nMatrix](https://attack.mitre.org/).\n\n\n## Threat Models\n\n\nTo help with our scenarios, we're taking two tactics from the MITRE ATT&CK\nmatrix: [Initial Access](https://attack.mitre.org/tactics/TA0001/) and\n[Execution](https://attack.mitre.org/tactics/TA0002/). There are similar\ncategories in other frameworks, such as the [cyber kill\nchain](https://en.wikipedia.org/wiki/Kill_chain#The_cyber_kill_chain).\n\n\n### Initial Access\n\n\nIn this phase, an attacker is attempting to establish access to your\ncomputing resources through different techniques. A single one might be\nsufficient for the attack to succeed but, quite often, a successful\ncompromise relies on a few different methods.\n\n\nThe diagram below shows three examples of how an attacker can gain access to\na container hosting an application accessible from the Internet.\n\n\n```mermaid\n\ngraph LR\n  classDef default fill:#FFFFFF,stroke:#0C7CBA;\n  classDef baddie fill:#ffd6cc,stroke:#991f00;\n\n  subgraph Kubernetes Cluster\n    subgraph Container\n      subgraph Application\n        Accounts[Valid\u003Cbr>Accounts]\n        click Accounts \"https://attack.mitre.org/techniques/T1078\"\n        style Accounts fill:#FFFFFF,stroke:#0C7CBA;\n\n        Dependencies[External\u003Cbr>Dependencies]\n        click Dependencies \"https://attack.mitre.org/techniques/T1195\"\n        style Dependencies fill:#FFFFFF,stroke:#0C7CBA;\n\n        Service[Network\u003Cbr>Service]\n        click Service \"https://attack.mitre.org/techniques/T1190\"\n        style Service fill:#FFFFFF,stroke:#0C7CBA;\n      end\n    style Application fill:#fff,stroke:#cccccc;\n  end\n  style Container fill:#f0f0f5,stroke:#cccccc;\n  end\n\n  Attacker -- Supply chain attack --> Dependencies\n  Attacker -- Exploit --> Service\n  Attacker -- Exposed Credentials --> Accounts\n\n  class Attacker baddie\n\n```\n\n\nThere are different ways threat vectors can be exploited but, to demonstrate\nGitLab's features, let's pick some specific examples of how it can happen.\nNone of these are made-up by the way; they have all happened - and continue\nto happen - in the wild.\n\n\n1. **Exposed Credentials**. Someone with legitimate access to your systems\nsaved valid account credentials in an application's code repository.\n\n1. **Supply Chain Attack**. There's no apparent vulnerability in the\napplication itself but the attacker managed to introduce one in an external\ndependency utilized by the application, so now it, too, is vulnerable.\n\n1. **Exploit**. The application is vulnerable to command execution because\nit doesn't validate user input properly.\n\n\n### Execution\n\n\nAt this point, the attacker has:\n\n\n1. Acquired credentials that allow access to most areas of the web\napplication.\n\n1. Discovered that the application is vulnerable to remote code execution.\n\n1. Introduced a different vulnerability to the application via an external\ndependency.\n\n\nThe next objective is to use one or more of these assets to execute\ninstructions of their choice on the target systems. The diagram below shows\ndifferent ways this can be accomplished.\n\n\n```mermaid\n\ngraph LR\n  classDef default fill:#FFFFFF,stroke:#0C7CBA;\n  classDef cl-container fill:#f0f0f5,stroke:#cccccc;\n  classDef baddie fill:#ffd6cc,stroke:#991f00;\n\n  subgraph Infrastructure\n    subgraph Container\n      Application\n      Others\n      Exploit[Executable Exploit]\n      Shell[Reverse Shell]\n\n      Application -- Deliver, Execute --> Exploit\n      Application -- Execute --> Shell\n      Others[Other\u003Cbr>Techniques] -- Deliver, Execute --> Exploit\n      Exploit -- Modify --> Filesystem\n      Exploit -- Spawn --> Shell\n    end\n\n    subgraph Containers\n      Internal(Internal Service)\n    end\n    Exploit -- Lateral Movement --> Internal\n    class Container,Containers cl-container\n  end\n\n  Shell -- Internet --> Attacker\n\n  class Attacker,Exploit,Others,Shell baddie\n\n```\n\n\nAgain we're choosing scenarios that fit our examples.\n\n\n1. **Deliver**, **Execute**. The attacker has an exploit that they would\nlike to deliver and execute.\n   1. The vulnerable application is tricked into writing arbitrary content to the container file system.\n   1. The vulnerable application is tricked into executing arbitrary commands.\n   1. The external dependency provides another, unspecified way to deliver and execute malicious code.\n1. **Spawn**. Execution of malicious code spawns a [reverse\nshell](https://en.wikipedia.org/wiki/Shell_shoveling) that connects to the\nattacker and waits for commands.\n\n1. **Modify**. The malicious code modifies configurations on the container's\nfile system that further exposes the container to attack, or perhaps,\nescalates the attacker's privileges.\n\n1. **Lateral Movement**. The attacker's exploit probes other hosts in the\ncontainer's network, managing to find and access an internal service that\nwasn't exposed to the Internet in the first place.\n\n\n## How GitLab Helps Stop These Attacks\n\n\nAs part of the [Secure](https://about.gitlab.com/direction/secure/) and\n[Protect](https://about.gitlab.com/direction/govern/) Stages, GitLab\ndelivered and continues to improve features that minimize your security risk\nand help you [shift security\nleft](/blog/efficient-devsecops-nine-tips-shift-left/).\n\n\nLet's see how these GitLab features would prevent and detect the attacks\ndescribed in our example scenarios.\n\n\n### Initial Access\n\n\nBy [shifting left](/blog/toolchain-security-with-gitlab/), all techniques in\nthis phase could be detected even before the application was deployed to an\nInternet-accessible environment.\n\n\nThis is done by taking advantage of [GitLab\nSecure](https://docs.gitlab.com/ee/user/application_security/) features as\npart of an application's [Continuous Integration\n(CI)](https://docs.gitlab.com/ee/ci/) builds.\n\n\n#### Exposed Credentials\n\n\nA [Secret\nDetection](https://docs.gitlab.com/ee/user/application_security/secret_detection/)\nscan reports several types of secrets accidentally or intentionally\ncommitted to your code repository, allowing the merge request author to\nremove and invalidate the exposed secret before it can be used in an attack.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W2tjcQreDwQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n#### Supply Chain Attack\n\n\nOne type of supply chain attack is against the open-source code libraries\nused by your application. [Dependency\nScanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\nreports known vulnerabilities in dependencies used by your application.\nScanners for multiple languages are available and kept up-to-date with a\ndatabase of known vulnerabilities so that potential vulnerabilities are\nidentified and reported as part of your CI builds.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uGhS2Wh6PBE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n#### Exploit\n\n\nFor the examples given in this category, there are two ways GitLab mitigates\nand prevents the described attacks. The first is [Dynamic Application\nSecurity Testing\n(DAST)](https://docs.gitlab.com/ee/user/application_security/dast/), another\nscanner that can be run as a CI job. The second way is through the GitLab\nWeb Application Firewall (WAF), part of our [Protect\nStage](/handbook/engineering/development/sec/govern/).\n\n\nBecause DAST executes against a running deployment of your application, it\ndetects potential problems that can't be discovered by merely analyzing an\napplication's source code. In our example, the attacker relies on an input\nvalidation weakness in the application that might be identified and reported\nas a [server side code\ninjection](https://www.zaproxy.org/docs/alerts/90019/) by DAST.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/wxcEiuUasyM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\nEffective security is implemented in layers and, should DAST fail to\nidentify a vulnerability, we can sometimes rely on WAF to block malicious\nrequests to the application.\n\n\nA WAF can monitor and block web traffic based on a set of pre-configured\nrules that determine if a request is potentially malicious or a response\nindicates compromised security. GitLab's WAF comes with the [OWASP\nModSecurity Core Rule\nSet](https://owasp.org/www-project-modsecurity-core-rule-set/) installed by\ndefault, which will successfully prevent various forms of [shell\ninjection](https://github.com/coreruleset/coreruleset/blob/7776fe23f127fd2315bad0e400bdceb2cabb97dc/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf#L415)\nand [SQL\ninjection](https://github.com/coreruleset/coreruleset/blob/v3.4/dev/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf)\nattacks.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/03n4C60YnDQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n### Execution\n\n\nIn case the previous counter-measures have failed to prevent initial access\nto our system, we have another layer of defense against attacks. Even after\na vulnerable application is deployed to a publicly accessible environment,\nwe can still detect and prevent cyberattacks.\n\n\n#### Detection\n\n\nIn our examples, the attacker modified the container filesystem and created\nnew processes by executing malicious code. These actions can be detected and\nlogged, as shown in the demonstration video below. Additionally, the logs\ncan be sent to a SIEM with Gitlab's [SIEM\nintegration](https://docs.gitlab.com/ee/update/removals.html), enabling a\nsecurity operations team to be notified of the suspicious activity within\nseconds of it happening.\n\n\nAs part of our [Container Host\nSecurity](https://about.gitlab.com/direction/govern/) features, you can\n[enable logging of system\ncalls](https://docs.gitlab.com/ee/update/removals.html) on any containers in\nyour [Kubernetes\ncluster](https://docs.gitlab.com/ee/user/project/clusters/).\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WxBzBz76FxU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n#### Prevention\n\n\nGitLab is able to prevent all attack examples described earlier: Lateral\nMovement, Reverse Shell, filesystem modification, and malicious code\nexecution attacks.\n\n\nBy deploying a [Network\nPolicy](https://docs.gitlab.com/ee/topics/autodevops/stages.html#network-policy)\nto your Kubernetes cluster, the compromised container would not be allowed\nto create an outbound connection to the attacker through the Internet.\nSimilarly, the Executable Exploit would be prevented from probing other pods\nin a cluster network due to policy restrictions.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pgUEdhdhoUI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\nTo prevent filesystem modification and restrict code execution, [Pod\nSecurity\nPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)\n[are supported](https://docs.gitlab.com/ee/update/removals.html) as part of\nour Container Host Security features.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fPy53c3rbAs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n## Conclusion\n\n\nThe number of container-based applications will continue to grow along with\nthe necessity to secure them, and our new [Container Host\nSecurity](/direction/govern/) category is part of the GitLab strategy to\nenable organizations to proactively protect their cloud-native environments.\n\n\nIn this blog post, we highlighted only a few of the DevSecOps features\ncurrently available in GitLab. For additional existing and upcoming\nfunctionality, please visit the product direction pages for\n[Protect](/direction/govern/) and [Secure](/direction/secure/).\n\n\nCover image by [JJ Ying](https://unsplash.com/@jjying) on\n[Unsplash](https://unsplash.com).\n\n{: .note}",[720,695,9],{"slug":827,"featured":6,"template":699},"container-security-in-gitlab","content:en-us:blog:container-security-in-gitlab.yml","Container Security In Gitlab","en-us/blog/container-security-in-gitlab.yml","en-us/blog/container-security-in-gitlab",{"_path":833,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":834,"content":840,"config":848,"_id":850,"_type":13,"title":851,"_source":15,"_file":852,"_stem":853,"_extension":18},"/en-us/blog/contributing-to-gitlab-with-ease",{"title":835,"description":836,"ogTitle":835,"ogDescription":836,"noIndex":6,"ogImage":837,"ogUrl":838,"ogSiteName":685,"ogType":686,"canonicalUrls":838,"schema":839},"Contributing to GitLab with ease","Everyone can contribute to GitLab, so here are a few tips to make your experience easy and pleasant.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678997/Blog/Hero%20Images/mergerequestsgame.jpg","https://about.gitlab.com/blog/contributing-to-gitlab-with-ease","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributing to GitLab with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lin Jen-Shin\"}],\n        \"datePublished\": \"2018-08-23\",\n      }",{"title":835,"description":836,"authors":841,"heroImage":837,"date":843,"body":844,"category":845,"tags":846},[842],"Lin Jen-Shin","2018-08-23","\nAs a [Merge Request Coach](https://handbook.gitlab.com/job-families/expert/merge-request-coach/), I am happy to\nhelp community contributors feel comfortable when contributing\nto GitLab. During my time reviewing merge requests, I’ve learned a bit about\nhow it feels contributing to GitLab as a newcomer, and I’d like to share\nmy learnings with you.\n\n## Common issues in an MR (merge request)\n\nIn the past, I think styling might have been one of the most common issues.\nHowever, we’re improving our CI to run more static analysis, so these issues\nare now automatically pointed out. Today, contributors can easily see what\ndidn’t pass CI, and they can fix the issues very quickly, so this is not as\ncommon as it was in the past.\n\nThe biggest issue today might be that many contributors don’t add tests, since\ntests often require much more effort than fixing or adding something. If\nyou’re struggling with adding tests, please don’t worry. Merge request coaches\ncan tell you how to add tests when we see your contribution, and we’ll work\nthrough it together.\n\n## Best practices\n\n1. If you only remember one best practice, I hope it is to keep this\nreference handy when [contributing to GitLab](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/development/contributing/index.md).\nI know it’s super long, but it has all the information you need when it comes\nto making contributions to GitLab.\n\n2. Get [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit) set up\nlocally if you haven’t already. Running tests locally is the best way to\ndevelop and debug, and I highly encourage that you incorporate this into your\nworkflow.\n\n3. Don’t ignore CI. If your pipeline didn’t pass, it’s important to go back and\nidentify the problem. Troubleshooting issues is a great way to practice your\nskills and help you learn from mistakes.\n\n4. Look at the [GitLab team page](/company/team/) and pick a merge request coach to\nping if you need help. Merge request coaches guide contributors and will even\njump in to help finish an MR if a contributor can no longer work on it,\nensuring that the attribution stays with the original contributor. Our goal is\nto help everyone feel comfortable and empowered to contribute even with\nsmallest possible effort. Coaches have other responsibilities and don’t always\nproactively look for contributors who need help, so ping them if you’re stuck\nor ready for a review. If they’re not the right person to ping, they’ll pass\nyou over to the right one. We love helping community contributors, and we look\nforward to guiding and working with you.\n\n## Little-known features\n\nWe [recently welcomed](/blog/introducing-gitlab-s-integrated-development-environment/)\nWeb IDE to quickly edit multiple files on the web directly without cloning\nthe whole repository. Web IDE is useful if you just want to make some small\nchanges online. If you’d like to learn more about Web IDE, please\nhead over to our [documentation](https://docs.gitlab.com/ee/user/project/web_ide/).\n\nSince GitLab's development velocity is pretty high, sometimes conflicts can\nhappen very frequently. Did you know that you can resolve conflicts directly\nfrom the web UI? I really love this feature, because it’s very easy to resolve\nsimple conflicts, and I don’t need to launch my editor or Git to pull, merge,\nand push. With some simple clicks, I can save a lot of time for simple\nconflicts.\n\n## What everyone should know about MRs\n\nTo me, an MR is a tool to interactively develop and explore with other people.\nDon’t worry about being perfect in the first version of your MR. We learn\nthrough our mistakes and get better over time.\n\nIf you’ve made tons of contributions, we invite you to join our\n[core team](/community/core-team/) or apply for a [full-time position](/jobs/) at GitLab.\nThe MR is one of the most important ways we work together, and we’d love to\ncollaborate with you.\n\n## What to do if you’re struggling\n\nIf you’re having some trouble getting the hang of merge requests, I suggest\ntaking a look at how others work on the MRs. Following other people’s example\ncan help you understand what they did and why they did it. Reaching out to a\nmerge request coach, joining discussions, and reviewing others’ code are also\nways to help you get up to speed. I think that interacting with others is a\ngreat way to learn and improve.\n\n## We’d love your contributions!\n\nWe really enjoy collaborating with community contributors, and we look forward\nto working together. If you don't know what you can contribute, please take a\nlook at [`Accepting merge requests`](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name[]=Accepting+merge+requests).\nWe label some issues to explicitly call out the ones that we won’t schedule\nanytime soon, but we still want it. These issues usually have very clear scopes,\nso they often just require a simple implementation. They’re nice targets if\nyou don’t know what to contribute but want to gain experience.\n\nIf you would like to see how we handle community contributions, please take a\nlook at [`Community contribution`](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?label_name[]=Community%20contribution).\nWe put this label on all community contributions, therefore you can easily\nfind all the past and current community contributions. We look forward to\nyour future contributions as well!\n\n[Cover image](https://unsplash.com/photos/vqDAUejnwKw) by\n[Victor Freitas](https://unsplash.com/@victorfreitas), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n","open-source",[268,847,9,696,721],"collaboration",{"slug":849,"featured":6,"template":699},"contributing-to-gitlab-with-ease","content:en-us:blog:contributing-to-gitlab-with-ease.yml","Contributing To Gitlab With Ease","en-us/blog/contributing-to-gitlab-with-ease.yml","en-us/blog/contributing-to-gitlab-with-ease",{"_path":855,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":856,"content":862,"config":871,"_id":873,"_type":13,"title":874,"_source":15,"_file":875,"_stem":876,"_extension":18},"/en-us/blog/deploying-application-eks",{"title":857,"description":858,"ogTitle":857,"ogDescription":858,"noIndex":6,"ogImage":859,"ogUrl":860,"ogSiteName":685,"ogType":686,"canonicalUrls":860,"schema":861},"Deploying apps to GitLab-managed Amazon EKS with Auto DevOps","A Kubernetes tutorial: Use GitLab AutoDevOps to deploy your applications to Amazon EKS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/deploying-application-eks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2020-05-05\",\n      }",{"title":863,"description":858,"authors":864,"heroImage":859,"date":866,"body":867,"category":762,"tags":868},"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps",[865],"Abubakar Siddiq Ango","2020-05-05","\n\nDeploying an application onto Amazon EKS doesn't have to be painful. In fact, GitLab's [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) function makes it easy for developers to deploy applications from GitLab onto any cloud. In this tutorial, I break down how to deploy a simple ruby Hello, World application onto our GitLab-managed Amazon EKS cluster, which we created earlier ([read part one to learn how](/blog/gitlab-eks-integration-how-to/)). For the tutorial, I integrated GitLab with Amazon EKS in a GitLab group I created purposely for this, so all the projects created in the group can use the integration without any extra configuration. \n\nIn the previous blog post, we saw how seamless it is to create a Kubernetes cluster on Amazon EKS in GitLab with the right permissions. Developer productivity is greatly improved because there is no more need to manually set-up clusters and the same cluster can be used for multiple projects when Amazon EKS is integrated with GitLab at the group and instance levels, thus making onboarding new projects a breeze.\n\nIn this tutorial, we will be deploying a simple ruby Hello World application to our GitLab-managed Amazon EKS cluster. For the purpose of this tutorial, I have integrated GitLab with Amazon EKS at the group level on a group I own on GitLab.com, this way all projects created in the group can make use of the integration with no extra configuration.\n\n## A few things to note about AutoDevOps\n\nAuto DevOps provides pre-defined [CI/CD configuration](/topics/ci-cd/) which allows you to automatically detect, build, test, deploy, and monitor your applications. All you need to do is push your code and GitLab does the rest, saving you a lot of effort to set up the workflow and processes required to build, deploy, and monitor your project.\n\nYou'll need to execute the following steps for GitLab AutoDevOps to work seamlessly:\n\n* A [base domain](https://docs.gitlab.com/ee/user/project/clusters/#base-domain) name needs to be provided on GitLab’s integration page for Amazon EKS.\n\n ![AutoDevOps Base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/base-domain.png){: .shadow.medium.center}\n Setting the base domain for Auto DevOps\n{: .note.text-center}\n\n* GitLab creates subdomains for every project that is deployed using the project slug, project ID and the base domain name. For example, the link `https://abubakar-te-demos-minimal-ruby-app-2.eksdemo-project.gitlabtechevangelism.net/` is automatically created where `abubakar-te-demos-minimal-ruby-app` is the project slug and the project ID of two, both prepended to the base domain name, `eksdemo-project.gitlabtechevangelism.net`.\n\n* Create a wildcard A-record for the base domain and point it to the Ingress endpoint created during the integration in the public-hosted zone of your domain name on Route53. Selecting the ALIAS option in Route 53 will present a list of resources you have already created. You will see your Ingress endpoint in the list of elastic load balancers. Alternatively, you can copy and paste from GitLab’s integration page.\n\n ![Route53 Alias for base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/route53.png){: .shadow.small.center}\n Set-up alias for base domain using the generated Ingress endpoint.\n{: .note.text-center}\n\n* Install the pre-defined Kubernetes certificate management controller, certmanager on the GitLab - EKS integration, to ensure every URL created for your application has a Let’s Encrypt certificate.\n\n## Now, lets deploy our application\n\n### How to set-up the project\n\nIt takes five simple steps to set-up the project for your application.\n\nFirst, create a GitLab project from an existing sample, in this case, GitLab’s Auto DevOps example called Minimal Ruby App. There is nothing special about this application, it's just a ruby application you can use to try out the integration. If you integrated Amazon EKS at the group level on GitLab, you can just go ahead to create the project in the group. At the project level, you will have to perform the integration after creating the project.\n\nNext, copy the URL from the “Clone with HTTPS” field of the sample project, Minimal Ruby App:\n\n  ![Cloning over HTTPS](https://about.gitlab.com/images/blogimages/deploying-application-eks/https-clone.png){: .shadow.small.center}\n  The clone sample project.\n{: .note.text-center}\n\nThird, click the \"import project\" tab on the new project page, then click on the \"repo by URL\" button. Paste the URL you copied earlier in the text box for \"Git repository URL\" and click on \"create project\"\n\n  ![Importing Project](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-project.png){: .shadow.medium.center}\n  The progress of the sample project import.\n  {: .note.text-center}\n\nNext, the project will be imported and all the files from the sample will be available in your new project.\n\n  ![Project import progress](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-progress.png){: .shadow.medium.center}\n  The project import is completed.\n  {: .note.text-center}\n\nFinally, go to project settings > CI/CD > Auto DevOps and enable “Default to Auto DevOps pipeline”\n\n  ![Project Settings](https://about.gitlab.com/images/blogimages/deploying-application-eks/project-settings.png){: .shadow.medium.center}\n  Enable the Auto DevOps pipeline.\n  {: .note.text-center}\n\n### How to deploy your application\n\n* Now a pipeline is created and the project built, tested and deployed to production using the [default AutoDevOps CI files](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml).\n\n  ![Project Pipeline](https://about.gitlab.com/images/blogimages/deploying-application-eks/pipeline.png)\n  The first Auto DevOps pipeline.\n  {: .note.text-center}\n\n* Look inside the pipeline output to see the \"deployment to production\" line. This is where the URL is to access your application.\n\n  ![Deployment to production](https://about.gitlab.com/images/blogimages/deploying-application-eks/production-deploy.png)\n  Next, link to the deployed application.\n  {: .note.text-center}\n\n* In the image above, you can see the application has been deployed and can be accessed at `https://abubakar-te-demos-minimal-ruby-app-1.eksdemo-project.gitlabtechevangelism.net/`\n\nAnd it should show a “Hello World” message:\n\n  ![Deployed Application](https://about.gitlab.com/images/blogimages/deploying-application-eks/hello-world.png){: .shadow.medium.center}\n  The deployed application with \"Hello World\" message.\n  {: .note.text-center}\n\n## How to make changes to the deployed application\n\nIf any new changes are pushed, a different set of jobs is run to build, test, and review the changes before they can be merged to the master branch. I changed the \"Hello World\" text in the previous deployment to an HTML text in a new Git branch called `amazon-eks-html` using the GitLab WebIDE tool, and committed the changes.\n\n  ![Make changes to application](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-commit.png)\n  Making new changes to application.\n  {: .note.text-center}\n\nWhile committing the changes, I selected \"start a new merge request (MR),\" which took me to the MR page where I added more information about the changes in a new MR.\n\n  ![New Merge request](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr.png)\n  The MR to deploy the new application.\n  {: .note.text-center}\n\nIn the image above, you can see a pipeline is created to build, test and deploy using [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to allow you review the changes before deploying to production.\n\n  ![New MR pipeline test](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr-test.png)\n  MR with Review Apps\n  {: .note.text-center}\n\nOnce the review is finished, the application is deployed to a dedicated namespace in the Amazon EKS cluster for you to review before deploying to production. A URL for the [Review App](https://docs.gitlab.com/ee/ci/review_apps/) is provided, as shown in the image below.\n\n  ![Review Applications](https://about.gitlab.com/images/blogimages/deploying-application-eks/review-apps.png){: .shadow.medium.center}\n  The application in the Review App.\n  {: .note.text-center}\n\nThe `stop_review` job cleans up the Review App once the review is done. If MR approvals are required, the MR must be approved before being merged into the master branch. Once merged to master, the project is built, tested, and deployed to production.\n\n  ![Merged Change MR](https://about.gitlab.com/images/blogimages/deploying-application-eks/merged-mr.png){: .shadow.medium.center}\n  Deploying changes to production.\n  {: .note.text-center}\n\nThe image above shows that a second pipeline ran after the MR was merged. Once completed, a button is provided to `view app` and also see memory consumption as the app runs. The `view app`\"` button will open the application on the project's subdomain.\n\n  ![Updated application](https://about.gitlab.com/images/blogimages/deploying-application-eks/updated-site.png)\n  Changes deployed to production.\n  {: .note.text-center}\n\n## Deploy to Amazon EKS with Auto DevOps\n\nThe Auto DevOps function at GitLab makes deploying an application to the Amazon EKS cluster quite simple. Really, all you need to do is push code, and Auto DevOps automatically detects the programming language and uses the necessary [buildpack](https://buildpacks.io/) to test, build, and deploy your application. GitLab also takes making changes to your application a step further using Review Apps, which deploys your app to a temporary environment for you to review the app before deploying to production.\n\nIf you have questions about how to integrate GitLab with Amazon EKS to create a Kubernetes cluster, revisit the [first blog post](/blog/gitlab-eks-integration-how-to/).\n",[720,869,9,870],"features","tutorial",{"slug":872,"featured":6,"template":699},"deploying-application-eks","content:en-us:blog:deploying-application-eks.yml","Deploying Application Eks","en-us/blog/deploying-application-eks.yml","en-us/blog/deploying-application-eks",{"_path":878,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":879,"content":885,"config":894,"_id":896,"_type":13,"title":897,"_source":15,"_file":898,"_stem":899,"_extension":18},"/en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"title":880,"description":881,"ogTitle":880,"ogDescription":881,"noIndex":6,"ogImage":882,"ogUrl":883,"ogSiteName":685,"ogType":686,"canonicalUrls":883,"schema":884},"DevSecOps platforms help SMBs scale as they grow","Adopting a comprehensive platform early lets smaller businesses mature with best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668641/Blog/Hero%20Images/smbscale.jpg","https://about.gitlab.com/blog/devsecops-platforms-help-smbs-scale-as-they-grow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms help SMBs scale as they grow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-17\",\n      }",{"title":880,"description":881,"authors":886,"heroImage":882,"date":888,"body":889,"category":890,"tags":891},[887],"Sharon Gaudin","2023-01-17","\nFor startups and small to medium-sized businesses (SMBs) working to expand their customer base, revenue, and standing in their industries, adopting a [DevSecOps](/topics/devsecops/) platform is one move that can help make all of that growth happen. \n\nThe trick is to migrate to a single, end-to-end platform when the organization is small, so bad habits are avoided early on and constructive processes can be built in and scale as the business grows. A DevSecOps platform enables small businesses to set up an environment and work processes that help them avoid common pitfalls that can come with growth.\n\n## How DevSecOps platforms help SMBs scale\n\nHere are a few ways a DevSecOps platform can help smaller businesses and startups scale:\n\n### Reducing complexity\n\nWhen someone is on a small IT team, the last thing they need is something complicating their job and taking up their precious time. And if they are stitching together multiple tools, they end up creating a [clumsy, ad-hoc toolchain](/blog/battling-toolchain-technical-debt/). That by its very nature forces DevOps professionals to wrestle with a chaotic environment that leads to bottlenecks and requires constant management, tweaking, updating, and switching between interfaces. All of that toolchain care and feeding comes at the expense of simply focusing on delivering code that drives the organization’s bottom line. \n\n### Avoiding silos\n\nMaybe a company is small enough that silos aren’t a problem... right now. But as the business grows, silos likely will grow along with it, causing problems. Silos mean people are heads down working on their own project, or even worse, their own part of a project, without any visibility into the rest of it, or the ability to comment or share their work. It’s easy to create silos if you’re not using a DevSecOps platform because people often naturally separate off into single-minded groups that do not communicate with or understand each other. DevSecOps platforms foster collaboration, making it easier to keep silos from forming in the first place. They create a working environment open to communication and collaboration. A platform will give people the ability to work together, and that collective effort will produce better software. \n\n### Increasing collaboration\n\nAdopting a single, end-to-end platform when a company is small or when a startup is just getting off the ground will enable and encourage everyone in the business (from IT to finance, marketing, and sales) to work together. And it’s easier to create [a collaborative culture](/blog/why-devops-collaboration-continues-to-be-important/) from the very beginning, when working together can become a habit – a normal means of operation. Instilling an environment of communication also is less disruptive and easier to manage in a company of 10, 25, or even 100 employees than in a much larger and complex business. Collaboration also will encourage innovation by bringing in ideas from people in a range of demographics and business interests. Innovative ideas will help businesses grow into more successful and larger companies.\n\n### Decreasing hands-on work\n\nBecause startups and SMBs have fewer IT people, let alone teams of DevOps professionals, the [automation](/blog/how-automation-is-making-devops-pros-jobs-easier/) that is an integral part of a DevSecOps platform eases their burden by decreasing the amount of hands-on work they have to do. With automation for jobs like backup, installation, and security testing built in, people spend less of their already-limited time needlessly repeating time-consuming tasks, or going back in the software lifecycle to find where a security bug was introduced. Automating tasks required for everything from design to build, test, and deployment also can reduce the potential for human error and provide consistency throughout the software lifecycle. By taking those jobs off DevSecOps teams' plates, they have more time to actually build and deploy innovative software and support the business. \n\nLet’s be clear: A startup or SMB isn’t too small for a DevSecOps platform. If an organization is building software, it needs a platform. Business executives don’t want to struggle to grow and look back regretfully and think, “Why didn’t I adopt a DevSecOps platform earlier?”\n\n“If you’re on a small team or even just a team of one, migrating could seem like a lot to take on,” says [Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab. “But it’s worth the effort to set yourself up for growth. With a platform, everyone in the company is able to work in the same environment on the same projects. That means a collaborative environment without silos is formed early and the business can grow with that culture, instead of trying to adopt it years down the road when bad work habits have already formed.”\n\nWith GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. It also helps organizations eliminate or even keep silos from forming, increases collaboration and communication, and decreases the complexities that are born of DIY toolchains.\n\n**Download our [ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html)** to learn about the benefits of migrating from a toolchain to GitLab’s DevSecOps platform. \n\n_Cover image by [Markus Spiske](https://unsplash.com/de/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)_\n","devsecops",[743,892,9,893],"DevOps platform","growth",{"slug":895,"featured":6,"template":699},"devsecops-platforms-help-smbs-scale-as-they-grow","content:en-us:blog:devsecops-platforms-help-smbs-scale-as-they-grow.yml","Devsecops Platforms Help Smbs Scale As They Grow","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow.yml","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"_path":901,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":902,"content":908,"config":913,"_id":915,"_type":13,"title":916,"_source":15,"_file":917,"_stem":918,"_extension":18},"/en-us/blog/enables-rapid-innovation",{"title":903,"description":904,"ogTitle":903,"ogDescription":904,"noIndex":6,"ogImage":905,"ogUrl":906,"ogSiteName":685,"ogType":686,"canonicalUrls":906,"schema":907},"GitLab uniquely enables rapid innovation","Learn about some of the ways GitLab can uniquely enable your developers to innovate more rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681603/Blog/Hero%20Images/rapids-cover-1275x750.jpg","https://about.gitlab.com/blog/enables-rapid-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uniquely enables rapid innovation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":903,"description":904,"authors":909,"heroImage":905,"date":910,"body":911,"category":693,"tags":912},[738],"2020-09-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA challenge that organizations often face is the amount of time spent maintaining their IT systems vs. the time spent innovating and developing differentiating features for customers.  This challenge has become even more difficult during a global pandemic where working from home makes it harder to engage with your customers in person and digital channels have become the primary vehicle to do business with consumers of your services and products. Rapid innovation means your organization and teams can deliver lovable features faster and get value into the hands of customers sooner. This is more urgent than ever before to remain competitive and ultimately survive in this new business reality, and requires your developers to spend more time creating and developing code rather than managing multiple disparate tools, environments, and processes.\n\nGitLab uniquely enables rapid innovation by simplifying the adoption of DevOps practices so that your developers can spend more time creating innovative features and applications that matter to your customers. \n\nWatch this video (~6 mins) to see these rapid innovation capabilities in action.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/MLrqJ1sxkjQ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nThe following is a non-exhaustive list of ways that GitLab helps your teams to achieve rapid innovation.\n\n### Easy collaboration across multiple roles and asset types\n\nApplication creators and stakeholders within every organization come from many disciplines, often times each using their own file types to get work done. For example:\n* **Product Designers** typically work with the output of their design tools, which could be Figma or Sketch files, images, or graphs.\n* **Developers** mainly work with programming language source files (code).\n* **DevOps Engineers** might use Infrastructure-as-code files, like Terraform, CloudFormation, or Azure Resource Manager files\n* **Database Administrators** often use Data Definition Language (DDL), Data Manipulation Language (DML), and SQL scripts.\n\nWhereas other CI/CD solutions typically stick to one type of asset, with GitLab, stakeholders can easily collaborate and contribute using their preferred asset types as part of a single conversation across the whole software development lifecycle. Not only does this enrich the conversation between all stakeholders, but it speeds up the innovation process by lowering the barrier for cross team collaboration.\n\n![issue with design picture](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/issue-with-design-picture.png){: .shadow.medium.center.wrap-text}\n\n### Security and compliance\n\nSecurity and compliance is usually a top priority for CIOs and directly affects how code is developed throughout the end-to-end SDLC. It's critical to protect your IP and equally important for customers to take confidence in the fact that their sensitive data is safe and secure. Instead of putting together your own mechanisms to check security vulnerabilities, license compliance, dependency scanning, static and dynamic application security testing, performance, fuzz testing, among others, GitLab provides you with built-in templates to do all these from within your CI pipeline. All you have to do is include them in your pipeline and voila! By leveraging these templates you can more quickly focus on creating and innovating.\n\n![build and test pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/build-and-test-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Review Apps\n\nWouldn’t it be great if you could effortlessly enable all stakeholders to review the application changes BEFORE they are merged to the main branch? Instead of orchestrating and putting together a review environment and building, loading and executing the application to it for every update, you can leverage GitLab Review Apps capability, which streamlines the review process by automatically creating (and cleaning up) temporary review environments with every change. This let's developers focus on innovation instead of environment setup.\n\n![review pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/review-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Deep Kubernetes integration\n\nAnother way that GitLab uniquely enables you to innovate more rapidly is the deep integration to Kubernetes clusters, which not only includes the automatic creation of and deployment to K8s clusters, but also includes automatic cluster monitoring, per application metrics, and the one-click deployment and management of a variety of supplemental applications such as a Web Application Firewall, Cert-Manager, Prometheus, GitLab Runner, Crossplane, JupyterHub, Elastic Stack, Fluentd, Knative, and GitLab Container Network Policies.\n\nKubernetes clusters can be set up by developers at their project level or by admins at the group levels, enabling developers to take advantage of container-based development best practices without needing deep subject matter expertise. This allows developers to spend more of their time working on what matters: creating great product.\n\n![K8s apps](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/K8s-apps.png){: .shadow.medium.center.wrap-text}\n\n### Automatic environments management\n\nGitLab will automatically spin up and tear down environments as needed by the CI/CD pipeline. For example, GitLab automatically spins up pods for the review, staging and production environments. All this infrastructure automation removes the burden of having to manage infrastructure off of your shoulders so that you can spend more time developing and creating code faster.\n\n![environments](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/environments.png){: .shadow.medium.center.wrap-text}\n\n### Pipeline template creation\n\nOnce you create a pipeline based on the best practices for your organization, you can turn it into a pipeline template that your development teams can use. Other developers can reuse this new template in their projects so that they can get right to creating and innovating differentiating features and applications that matter to their consumers.\n\n![steps to create pipeline template](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/steps-create-pipeline-template.png){: .shadow.medium.center.wrap-text}\n\n### Auto DevOps\n\nIf you’d like to leverage a complete DevOps predefined CI/CD pipeline, which is based on best practices, why not use Auto DevOps? Auto DevOps allows you to automatically detect, build, test, deploy, and monitor your applications. Leveraging CI/CD best practices and tools, Auto DevOps aims to simplify the setup and execution of a mature and modern software development lifecycle. The Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental fashion. As you can see, Auto DevOps saves you from implementing your own pipeline so that you can spend more time innovating.\n\n![partial auto devops pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/partial-Auto-DevOps-pipeline.png){: .shadow.medium.center.wrap-text}\n\nThese are some of the ways GitLab uniquely enables you to innovate more rapidly by ensuring that everything is where you need it when you need it, empowering you to focus on creating and developing innovations, delivering solutions faster, putting new products and services more quickly in the hands of your customers and remaining competitive. And all within a single application.\n\nFor more videos and demos visit [Learn@GitLab](https://about.gitlab.com/learn/).\nTo learn more about how GitLab can help you innovate more rapidly visit [the GitLab website](https://about.gitlab.com)\n\nCover image by [Florian Bernhardt](https://unsplash.com/@floww?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/rapids?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[108,743,9],{"slug":914,"featured":6,"template":699},"enables-rapid-innovation","content:en-us:blog:enables-rapid-innovation.yml","Enables Rapid Innovation","en-us/blog/enables-rapid-innovation.yml","en-us/blog/enables-rapid-innovation",{"_path":920,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":921,"content":927,"config":933,"_id":935,"_type":13,"title":936,"_source":15,"_file":937,"_stem":938,"_extension":18},"/en-us/blog/fluentd-using-gitlab-ci-cd",{"title":922,"description":923,"ogTitle":922,"ogDescription":923,"noIndex":6,"ogImage":924,"ogUrl":925,"ogSiteName":685,"ogType":686,"canonicalUrls":925,"schema":926},"Thanks Fluentd for betting on GitLab CI/CD!","We're happy to support fresh CNCF graduate Fluentd with GitLab CI/CD, and excited about their latest innovation offering stream processing on the edge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678614/Blog/Hero%20Images/gitlab-fluentd.png","https://about.gitlab.com/blog/fluentd-using-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks Fluentd for betting on GitLab CI/CD!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":922,"description":923,"authors":928,"heroImage":924,"date":929,"body":930,"category":845,"tags":931},[714],"2019-05-21","\nFluentd, the [latest project to graduate](https://www.fluentd.org/blog/fluentd-cncf-graduation) in the CNCF, announced on stage at KubeCon Barcelona today that it is using [GitLab CI/CD](/solutions/continuous-integration/) for continuous integration. We are thrilled about the shout out and honored to support such an influential and innovative project.\n\nFor those who haven’t yet worked with Fluentd, it is an [open source data collector](https://www.fluentd.org/architecture), which lets you unify the data collection and consumption for a better use and understanding of data. Fluent Bit is their lighter-weight forwarder for those with exacting memory requirements. The project sports 7,868 stars on GitHub and their community has contributed more than 900 contributed plugins. They witness more than 100K downloads a day!\n\nThe latest innovation from Fluentd around [stream processing on the edge](https://docs.fluentbit.io/stream-processing/) can be very useful for our industry. As many of those who monitor large-scale, complex, distributed systems, run IoT businesses, or build smart cities will attest, more and more data is generated by these systems and analysis often needs to happen blazingly fast to be meaningful. The standard data analysis model, where it is first stored and indexed in a database (presumably in some cloud) and then analyzed, is not good enough for some real-time and complex analysis needs. The latencies associated with such data transfer may not be able to support applications involving time-critical, data-driven decision making. With Fluent bit, the Fluent team is looking to process the data while it's still in motion in the Log processor – bringing a lot of advantages of speed.\n\nWhile I am reading papers by others attempting to build stream processing on the edge, I find Fluentd’s efforts exciting because they already have major community traction and are part of companies’ observability workflows for logging. The [CNCF graduation criteria](https://github.com/cncf/toc/blob/master/process/graduation_criteria.adoc) that Fluentd met will further embolden enterprises to try it out, as part of the requirements are a diverse contributor community and security audits.\n\nWe've spent the past few months collaborating with Fluentd on their CI needs, and it's been very educational for us. We learned about the unique challenges that fast-moving projects in the CNCF face, and how we can be of assistance with our CI/CD offering. A large part of the answer is providing clear and consistent guidance around converting pipelines and then supporting the projects to success. If you are a CNCF project interested in working with GitLab CI/CD, holler at us and we’d be delighted to help.\n\nUntil then, enjoy KubeCon Barca!\n",[108,721,9,932,278,720],"cloud native",{"slug":934,"featured":6,"template":699},"fluentd-using-gitlab-ci-cd","content:en-us:blog:fluentd-using-gitlab-ci-cd.yml","Fluentd Using Gitlab Ci Cd","en-us/blog/fluentd-using-gitlab-ci-cd.yml","en-us/blog/fluentd-using-gitlab-ci-cd",{"_path":940,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":941,"content":947,"config":955,"_id":957,"_type":13,"title":958,"_source":15,"_file":959,"_stem":960,"_extension":18},"/en-us/blog/getting-started-gitlab-ci-gcp",{"title":942,"description":943,"ogTitle":942,"ogDescription":943,"noIndex":6,"ogImage":944,"ogUrl":945,"ogSiteName":685,"ogType":686,"canonicalUrls":945,"schema":946},"Getting started with GitLab CI/CD and Google Cloud Platform","Discover how easy it is to set up CI/CD and Kubernetes deployment with our integration with Google Kubernetes Engine.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/getting-started-gitlab-ci-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab CI/CD and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-04-24\",\n      }",{"title":942,"description":943,"authors":948,"heroImage":944,"date":950,"body":951,"category":762,"tags":952},[949],"GitLab","2018-04-24","\n\nEarlier this month [we announced our new native integration with Google Kubernetes Engine (GKE)](/blog/gke-gitlab-integration/),\nallowing you to [set up CI/CD](/topics/ci-cd/) and Kubernetes deployment in just a few clicks. If you're new to\nGitLab CI on Google Cloud Platform (GCP), we've put together a quick [demo](#demo) and [instructions](#instructions) you can view below. For a more detailed walkthrough and the chance to ask questions, join us on April 26 for a [live demo](#join-google-and-gitlab-for-a-live-demo).\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/u3jFf3tTtMk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Instructions\n\n### Add a Kubernetes Engine cluster\n\nHead on over to the CI/CD -> Kubernetes menu option in the GitLab UI. Here you can add your existing cluster to your project or create a brand new one.\n\n![Add your Kubernetes cluster](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step1.png){: .shadow.center.medium}\n\nOnce connected, you can install applications like [Helm Tiller](https://helm.sh/), [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), [Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/), and [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/) to your cluster with just one click.\n\n![Install applications](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/install-applications.png){: .shadow.center.medium}\n\n### Enable Auto DevOps\n\nWe've also worked with Google to integrate [GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with GKE. Using them together, you'll have a continuous deployment pipeline that automatically creates a [review app](https://docs.gitlab.com/ee/ci/review_apps/) for each merge request and once you merge, deploys the application into production on production-ready GKE.\n\nTo get started, go to CI/CD -> General pipeline settings, and select “Enable Auto DevOps.” For more information, read the [Auto DevOps docs](https://docs.gitlab.com/ee/topics/autodevops/).\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step2.png){: .shadow.center.medium}\n\nAuto DevOps takes the manual work out of CI/CD by automatically detecting what languages you’re using, and configuring a continuous integration and continuous deployment pipeline that results in your app running live on the Kubernetes Engine cluster.\n\n![Review pipeline](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step3.png){: .shadow.center.medium}\n\nNow, whenever you create a merge request, we'll run a review pipeline to deploy a review app to your cluster where you can preview your changes. When you merge the code, GitLab will run a production pipeline to deploy your app to production, running on Kubernetes Engine!\n\n## Get $500 credit for your project\n\nEvery new Google Cloud Platform account receives $300 in credit [upon signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, we're offering an additional $200 for both new and existing GCP accounts to get started with the GKE integration. Here's a link to [apply for your $200 credit](https://goo.gl/AaJzRW).\n\n## Join Google and GitLab for a live demo\n\nJoin Google’s [William Denniss](https://www.linkedin.com/in/williamdenniss/) and GitLab’s [William Chia](https://www.linkedin.com/in/williamchia/) for a walkthrough of the integration on April 26. You’ll learn how easy it is to set up a Kubernetes cluster, how to deploy your app using GitLab CI/CD, and how GKE enables you to deploy, update, and manage containerized applications at scale.\n\n[Register today](/webcast/scalable-app-deploy/)!\n",[953,954,720,9],"google","GKE",{"slug":956,"featured":6,"template":699},"getting-started-gitlab-ci-gcp","content:en-us:blog:getting-started-gitlab-ci-gcp.yml","Getting Started Gitlab Ci Gcp","en-us/blog/getting-started-gitlab-ci-gcp.yml","en-us/blog/getting-started-gitlab-ci-gcp",{"_path":962,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":963,"content":969,"config":976,"_id":978,"_type":13,"title":979,"_source":15,"_file":980,"_stem":981,"_extension":18},"/en-us/blog/gitlab-apis-ci",{"title":964,"description":965,"ogTitle":964,"ogDescription":965,"noIndex":6,"ogImage":966,"ogUrl":967,"ogSiteName":685,"ogType":686,"canonicalUrls":967,"schema":968},"Using Gitlab APIs: Real Use Case Scenario","Learn about how GitLab CI and APIs can help you automate bulk tasks","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681037/Blog/Hero%20Images/gitlabapi-cover.jpg","https://about.gitlab.com/blog/gitlab-apis-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using Gitlab APIs: Real Use Case Scenario\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2020-01-22\",\n      }",{"title":964,"description":965,"authors":970,"heroImage":966,"date":972,"body":973,"category":693,"tags":974},[971],"William Arias","2020-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitlab APIs along with  Continuous Integration can be very helpful when executing certain bulk tasks.\n\nConsider this requirement derived from a real-world scenario\n\n* Company XYZ possess several repositories that have been organized under a Gitlab group\n\n![group](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/gitlab-group.png){: .shadow.medium.center.wrap-text}\n\n* The company needs to test the building of projects in bulk using new  hardware (Runner with different CPU Architecture) that will bring down  execution costs, whenever the build in each of the projects fails an issue must be  automatically created.\n\n![runner](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/runner.png){: .shadow.medium.center.wrap-text}\n\n* Lastly, all the issues that were automatically created whenever a project built failed,  should be collected in bulk and reported back to a Wiki\n\n![pipelineview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/3-pipelineview-collect-issues.png){: .shadow.medium.center.wrap-text}\n\nHow do we test the building of those several projects and create issues and reports about its execution automatically? Let's use Gitlab CI and  APIs.\n\n\n## 1. Company groups and projects Structure\n\nIn this case, the set of projects were grouped under a single group, following this structure:\n\n![groupview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/4-group-view-api-blog.png){: .shadow.medium.center.wrap-text}\n\n## 2. Automatically creating Issues leveraging Gitlab CI and API\n\nIn order to create issues using Gitlab API we will use the Issues API an example of that  can use the following cURL command:\n\n![curl](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-create-issue-api-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nThe API Call: \n\n `curl --request POST --header \"PRIVATE-TOKEN:$ISSUE_API_KEY\" \"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/issues?title=Build%20Failed&labels=ARMbuild&description=Project%20Tests%20Failed%20on%20ARM\"`\n\n The previous Gitlab API call can be configured to be executed whenever a job fails. Let's dissect this API Call to understand its parameters so you can potentially customize it  for your project environment\n\n* Base URL:  https://gitlab.com/api/v4/projects\n* Project where we want to add the issue:  $CI_PROJECT_ID Notice this ID is unique and corresponds to the project where the CI/CD pipeline runs \n* Issues: Endpoint we use to tell Gitlab we want to add an issue to the project\n* Parameters:\n  * Title: How we want the issue to be titled\n  * Labels: Helpful to group issues by label or type, They help you organize and tag your work so you can track and find the work items you’re interested in.\n  * Description: Field to explain the nature of the issue if needed\n\n The request is of type POST, because we are sending data to our receiver service.  For this call to be successful it requires  authentication for which we will use *PRIVATE-TOKEN* header\n\n The private token can be generated by following these steps [How-to-generate-token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n\nWhen we execute the above API call, we create an issue in the corresponding Gitlab project\n![issueproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-issues-created.png){: .shadow.medium.center.wrap-text}\n\nGreat, so once the multi-project pipeline has run,  each of the projects that failed in its building stage will create an issue warning us to double check why it failed while documenting the failure and labeling it for future follow-up.\n![multiproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7.1-multiproject-pipeline-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\n## 3. Automatically collecting all the issues from Gitlab Group\n\nThanks to Gitlab CI and APIs we can collect all the issues created and report them back, by adding this script  in  your pipeline stage\n\n![collectissues](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7-collecting-issues-apiblog.png){: .shadow.medium.center.wrap-text}\n\nLet's dissect again the main API call:\n\n`curl --header \"PRIVATE-TOKEN:$GROUP_ISSUE_LIST\" \"https://gitlab.com/api/v4/groups/9123625/issues`\n\n* Base url: https://gitlab.com/api/v4/\n* Group resource: /groups/9123625\n* Issues resources: /issues \n\nThe previous API call will return a json object, the one we will save as an artifact when executing our pipeline job. Notice this artifact is created and saved automatically by Gitlab CI\nGreat! So far we created issues per failed project, and collected them all in one single step\n\n\n## 4. Reporting back to Wiki Project \n\n![wikijob](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/8-reportwiki-gitlab-api.png){: .shadow.medium.center.wrap-text}\n\nFor convenience, the json report was transformed to markdown, then using the following script we publish the markdown report to the Wiki of an specific project\n\n`curl --data \"format=markdown&title=$CI_JOB_ID&content=$results\" --header \"PRIVATE-TOKEN:$API_WIKI\" \"https://gitlab.com/api/v4/projects/20852684/wikis\"`\n\nLet's breakdown again the API call:\n\n* Base url: https://gitlab.com/api/v4/\n* Project resource ID : /projects/20852684\n* Wiki resource: /wiki\n* Parameters: \n  * Data format: markdown. We want to publish a markdown table\n  * Title: Title of the Wiki entry, we use the environment variable corresponding to the CI_JOB that was executed\n  * Content: The markdown table generated with the issues collection\n\n Finally, when the last API call has been executed, this is an example of the output we can get: \n\n ![report](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/10-test-report-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nLet's recapitulate, by using Gitlab CI in a multi project pipeline along with APIs we were able to test and report automatically x-number of projects and its compatibility with a new hardware CPU architecture. More information about the APIs utilized for this project here:\n\n[Issues-api](https://docs.gitlab.com/ee/api/issues.html#new-issue)\n[Collect-group-issues](https://docs.gitlab.com/ee/api/issues.html#list-group-issues)\n[WikisAPI](https://docs.gitlab.com/ee/api/wikis.html)\n\n[Multi-project-pipeline](https://about.gitlab.com/blog/cross-project-pipeline/)\n\n\nIf you’d like to see GitLab’s API in action, watch this [video](https://youtu.be/zdBwMHARkU0?t=469).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nCover image credit:\n\nCover image by [Mohanan](https://unsplash.com/photos/yQpAaMsQzYE) on [Unsplash](https://unsplash.com)\n{: .note}\n\n",[718,975,743,9],"AWS",{"slug":977,"featured":6,"template":699},"gitlab-apis-ci","content:en-us:blog:gitlab-apis-ci.yml","Gitlab Apis Ci","en-us/blog/gitlab-apis-ci.yml","en-us/blog/gitlab-apis-ci",{"_path":983,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":984,"content":990,"config":996,"_id":998,"_type":13,"title":999,"_source":15,"_file":1000,"_stem":1001,"_extension":18},"/en-us/blog/gitlab-arm-aws-graviton2-solution",{"title":985,"description":986,"ogTitle":985,"ogDescription":986,"noIndex":6,"ogImage":987,"ogUrl":988,"ogSiteName":685,"ogType":686,"canonicalUrls":988,"schema":989},"Announcing 32/64-bit Arm Runner Support for AWS Graviton2","GitLab enables CI/CD solution on Arm-based AWS Graviton2 instances.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/gitlab-arm-aws-graviton2-solution","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing 32/64-bit Arm Runner Support for AWS Graviton2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kushal Koolwal\"}],\n        \"datePublished\": \"2020-05-15\",\n      }",{"title":985,"description":986,"authors":991,"heroImage":987,"date":993,"body":994,"category":719,"tags":995},[992],"Kushal Koolwal","2020-05-15","\n\n_Kushal Koolwal is senior manager, Software Ecosystem Development at Arm Inc._\n\nAt Arm TechCon 2019, GitLab and Arm [announced](/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm/) a joint partnership with the goal of providing first class citizen support for Arm architecture starting with [GitLab’s CI/CD tool](/topics/ci-cd/).\n\n\"Arm is on a mission to make cloud-native developers’ experience frictionless by building out the software stack and enabling a complete set of developer tools,\" says [Pete Goldberg](/company/team/#pete_goldberg), director of Partnerships, GitLab. \"Amazon Web Services (AWS) is the first major cloud provider to build and deploy Arm-powered compute instances. GitLab is proud to be Arm’s CI/CD solution, enabling DevOps to seamlessly certify new and existing applications in production environments hosted on AWS Graviton2.\"\n\n### GitLab and Arm announcement and partnership enhancements\n\nToday, the partnership achieved another major milestone in its partnership efforts with the delivery of official support for 32-bit and 64-bit Arm-based GitLab runners in binary, rpm/deb packaging, and Docker image format.\n\n#### This milestone highlights the following enhancements:\n\n\n*   Arm [Runner binaries](https://gitlab-runner-downloads.s3.amazonaws.com/latest/index.html) were made available in the 12.6 release as part of the Graviton2 launch at [AWS re:Invent 2019](/blog/updates-from-aws-reinvent/), allowing developers to start immediately in their custom environments.\n*   [RPM/DEB packages](https://packages.gitlab.com/runner/gitlab-runner) for easier install/upgrade in 12.9 release.\n*   Native Arm [Docker image](https://hub.docker.com/r/gitlab/gitlab-runner/tags) in 13.0 release for container-based environments.\n\n#### As a testament to the strength of the partnership, GitLab has:\n\n*   [Released a demo showing how to deploy and AWS Graviton2 M6g Instance](https://youtu.be/0dntra12w6w)\n*   [Joined](https://developer.arm.com/solutions/infrastructure/developer-resources/ci-cd/gitlab) the [Arm Neoverse developer program ](https://developer.arm.com/solutions/infrastructure/developer-resources/ci-cd/gitlab)\n*   Adding support for Arm architectures for [Auto DevOps](https://gitlab.com/gitlab-org/gitlab/-/issues/214552) and [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/1625)\n\nLearn more about EC2 M6g Instances, powered by AWS Graviton2, [here](https://aws.amazon.com/blogs/aws/new-m6g-ec2-instances-powered-by-arm-based-aws-graviton2/).\n",[232,108,9],{"slug":997,"featured":6,"template":699},"gitlab-arm-aws-graviton2-solution","content:en-us:blog:gitlab-arm-aws-graviton2-solution.yml","Gitlab Arm Aws Graviton2 Solution","en-us/blog/gitlab-arm-aws-graviton2-solution.yml","en-us/blog/gitlab-arm-aws-graviton2-solution",{"_path":1003,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1004,"content":1009,"config":1014,"_id":1016,"_type":13,"title":1017,"_source":15,"_file":1018,"_stem":1019,"_extension":18},"/en-us/blog/gitlab-eks-integration-how-to",{"title":1005,"description":1006,"ogTitle":1005,"ogDescription":1006,"noIndex":6,"ogImage":859,"ogUrl":1007,"ogSiteName":685,"ogType":686,"canonicalUrls":1007,"schema":1008},"How to create a Kubernetes cluster on Amazon EKS in GitLab","A Kubernetes tutorial: Create clusters in a few clicks with GitLab and Amazon EKS.","https://about.gitlab.com/blog/gitlab-eks-integration-how-to","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a Kubernetes cluster on Amazon EKS in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2020-03-09\",\n      }",{"title":1005,"description":1006,"authors":1010,"heroImage":859,"date":1011,"body":1012,"category":762,"tags":1013},[865],"2020-03-09","Kubernetes has created a whole new world for running infrastructure at\nscale. With the right setup, an application can go from serving a few users\nto millions effortlessly. Setting up Kubernetes can be tasking and can\nrequire a lot of expertise to put all the pieces together. You’ll need to\nset up virtual or bare metal machines to use as nodes and manage SSL\ncertificates, networking, load balancers, and many other moving parts.\n\n\nThe introduction of Amazon Elastic Kubernetes Service (EKS) was widely\napplauded as it streamlines the abstraction of the complexities in an\nenvironment most organizations are already familiar with and on a provider\nthey already trust. Amazon EKS makes creating and managing Kubernetes\nclusters easier with more granular controls around security and\nstraightforward policies of how resources are used.\n\n\nGitLab strives to increase developer productivity by automating repetitive\ntasks and allowing developers to focus on business logic. We recently\nintroduced support for auto-creating Kubernetes clusters on Amazon EKS. In a\nfew clicks with the right permissions, you’ll have a fully functional\nKubernetes cluster on Amazon EKS. It doesn’t stop there however – GitLab\nalso gives you the power to achieve the following use cases and more :\n\n\n* [Highly scalable CI/CD system using GitLab\nRunner](https://docs.gitlab.com/runner/): There are times like holidays when\nlittle to no changes to code are pushed to production, so why keep resources\ntied down? With the Amazon EKS integration with GitLab, you can install\nGitLab Runner with just a click and your CI/CD will run effortlessly without\nworrying about running out of resources.\n\n* Shared Cluster: Maintaining multiple Kubernetes clusters can be a pain and\ncapital intensive. With Amazon EKS, GitLab allows you to setup a cluster at\n[Instance](https://docs.gitlab.com/ee/user/instance/clusters/index.html),\n[Group](https://docs.gitlab.com/ee/user/group/clusters/index.html) and\n[Project](https://docs.gitlab.com/ee/user/project/clusters/) levels.\nKubernetes Namespaces are created for each GitLab project when the Amazon\nEKS is integrated at Instance and Project level, allowing isolation and\nensuring security.\n\n* [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html):\nReviewing changes to code or design can be tricky, you’ll need to check out\nyour branch and run the code in a test environment. GitLab integrated with\nAmazon EKS deploys your app with new changes to a dynamic environment and\nall you need to do is click on a “View App“ button to review changes.\n\n*\n[AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\ntakes DevOps to a whole new level. AutoDevOps detects, builds, tests,\ndeploys, and monitors your applications, leveraging the Amazon EKS\nintegration. All you have to do is push your code and the magic happens. In\nthis tutorial, we will deploy a sample application to the Amazon EKS cluster\nwe will be creating using AutoDevOps.\n\n\nTo show you how easy it is to create an Amazon EKS cluster from GitLab, the\nrest of this tutorial will walk you through the steps of the integration,\nstarting with a one-time setup of necessary resources on AWS.\n\n\n## One-time setup on AWS to access resources\n\n\nFirst, we need to create a “provision\" role and a “service” role on AWS to\ngrant GitLab access to your AWS resources and set up the necessary\npermissions to create and manage EKS clusters. You only need to perform\nthese steps once and you can reuse them anytime you want to perform another\nintegration or create more clusters.\n\n\n### Step 1 - Create Provision Role\n\n\nTo grant GitLab access to your AWS resources, a “provision role” is\nrequired. Let’s create one:\n\n\n1. Access GitLab Kubernetes Integration Page by clicking on the ”Kubernetes”\nmenu for groups and Operations > Kubernetes menu for projects and click the\n“Add Kubernetes Cluster” button.\n\n2. Select “Amazon EKS” in the options provided under the “Create new cluster\non EKS” tab.\n\n3. You are provided with an Account and External ID  to use for\nauthentication. Make note of these values to be used in a later step.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_page.png)\n\n4. Open IAM Management Console in another tab and click on “Create Role”\n\n5. Click on the “Another AWS account” tab and provide the Account and\nExternal ID obtained from GitLab and click Next to set permissions as shown\nbelow:\n\n    ![AWS Provision Role](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/provision_role.png)\n\n6. On the permissions page, click on “Create policy.” This will open a new\ntab where you can set either of the permissions below using JSON:\n\n    ```json\n    {\n        \"Version\": \"2012-10-17\",\n        \"Statement\": [\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                    \"autoscaling:*\",\n                    \"cloudformation:*\",\n                    \"ec2:*\",\n                    \"eks:*\",\n                    \"iam:*\",\n                    \"ssm:*\"\n                ],\n                \"Resource\": \"*\"\n            }\n        ]\n    }\n    ```\n\n    This gives GitLab full access to create and manage resources, as seen in the image below:\n\n    ![AWS Role Policy](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/create_role_policy.png)\n\n    If you prefer limited permission, you can give GitLab the ability to create resources, but not delete them with the JSON snippet below. The drawback here is if an error is encountered during the creation process, changes will not be rolled back and you must remove resources manually. You can do this by deleting the relevant CloudFormation stack.\n\n    ```json\n    {\n        \"Version\": \"2012-10-17\",\n        \"Statement\": [\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                    \"autoscaling:CreateAutoScalingGroup\",\n                    \"autoscaling:DescribeAutoScalingGroups\",\n                    \"autoscaling:DescribeScalingActivities\",\n                    \"autoscaling:UpdateAutoScalingGroup\",\n                    \"autoscaling:CreateLaunchConfiguration\",\n                    \"autoscaling:DescribeLaunchConfigurations\",\n                    \"cloudformation:CreateStack\",\n                    \"cloudformation:DescribeStacks\",\n                    \"ec2:AuthorizeSecurityGroupEgress\",\n                    \"ec2:AuthorizeSecurityGroupIngress\",\n                    \"ec2:RevokeSecurityGroupEgress\",\n                    \"ec2:RevokeSecurityGroupIngress\",\n                    \"ec2:CreateSecurityGroup\",\n                    \"ec2:createTags\",\n                    \"ec2:DescribeImages\",\n                    \"ec2:DescribeKeyPairs\",\n                    \"ec2:DescribeRegions\",\n                    \"ec2:DescribeSecurityGroups\",\n                    \"ec2:DescribeSubnets\",\n                    \"ec2:DescribeVpcs\",\n                    \"eks:CreateCluster\",\n                    \"eks:DescribeCluster\",\n                    \"iam:AddRoleToInstanceProfile\",\n                    \"iam:AttachRolePolicy\",\n                    \"iam:CreateRole\",\n                    \"iam:CreateInstanceProfile\",\n                    \"iam:CreateServiceLinkedRole\",\n                    \"iam:GetRole\",\n                    \"iam:ListRoles\",\n                    \"iam:PassRole\",\n                    \"ssm:GetParameters\"\n                ],\n                \"Resource\": \"*\"\n            }\n        ]\n    }\n    ```\n\n    The image below visualizes what permissions are granted:\n\n    ![Limited Role Policy](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/limited_role_policy.png)\n\n7. Once the policy is created, return to the “Create Role” browser tab and\nrefresh to see the policy we created listed. Select the policy and click\n“Next.”\n\n8. In the Tags section, we don’t need to set any Tags, except if it’s\nrequired in your organization. Let’s proceed to Review.\n\n9. Specify a Name for your new Role. You will see the policy we created\nlisted under policies and click “Create Role” to complete the process.\n\n10. Click on the new Role you created in the list of Roles to view its\ndetails. You may have to search for it in the list of Roles if it’s not\nlisted in the first view. Copy the Role ARN provided – we will need it on\nthe GitLab Kubernetes Integration page.\n\n\n### Step 2 - Create Service Role\n\n\nThe Service Role is required to allow Amazon EKS and the Kubernetes control\nplane to manage AWS resources on your behalf.\n\n\n1. In the IAM Management Console, click on “Create Role” and select the “AWS\nservice” tab.\n\n2. Select EKS in the list of services and Use Cases as shown below and click\nNext.\n\n    ![Service Role](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/service_role.png)\n\n3. You will notice the “AmazonEKSClusterPolicy” and “AmazonEKSServicePolicy”\npermissions are selected; these are all we need. Click through the Tags step\nand create if necessary, then click Next to get to the Review step. Click\n“Create Role” to complete the process.\n\n    ![Role Summary](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/role_summary.png)\n\n## GitLab EKS Integration\n\n\nThis is the easy part! As mentioned earlier, you only need to create the\nProvision and Service role once if you don’t already have them in your\norganization’s AWS setup. You can reuse the roles for other integrations or\ncluster creations.\n\n\n1. Return to the GitLab Kubernetes Integration page and provide the Role ARN\nof the Provision Role we created earlier and click “Authenticate with AWS.”\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_page.png)\n\n2. Once authenticated, you’ll have a page to set the parameters needed to\nset up your cluster as shown in the image below and click on “Create\nKubernetes Cluster” to let GitLab do its magic!\n\n    The parameters you’ll need to provide are:\n    * **Kubernetes cluster name** - The name you wish to give the cluster.\n    * **Environment scope** - The [GitLab environment](https://docs.gitlab.com/ee/user/project/clusters/index.html#setting-the-environment-scope) associated with this cluster; `*` denotes the cluster will be used for deployments to all environments.\n    * **Kubernetes version** - The Kubernetes version to use. Currently, the only version supported is 1.14.\n    * **Role name** - The service role we created earlier.\n    * **Region** - The [AWS region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) in which the cluster will be created.\n    * **Key pair name** - Select the [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) that you can use to connect to your worker nodes if required.\n    * **VPC** - Select a [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) to use for your EKS Cluster resources.\n    * **Subnets** - Choose the [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in your VPC where your worker nodes will run.\n    * **Security group** - Choose the [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets. AWS provides a default group, which can be used for the purpose of this guide. However, you are advised to setup up the right rules required for your resources.\n    * **Instance type** - The AWS [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes.\n    * **Node count** - The number of worker nodes.\n    * **GitLab-managed cluster** - Leave this checked if you want [GitLab to manage namespaces and service accounts](https://docs.gitlab.com/ee/user/project/clusters/index.html#gitlab-managed-clusters) for this cluster.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_post_auth.png)\n\n3. The cluster creation process will take approximately 10 minutes. Once\ndone you can proceed to install some predefined applications. At the very\nleast, you need to install the following:\n    - **Helm Tiller**: This is required to install the other applications.\n    - **Ingress**: This provides SSL termination, load balancing and name-based virtual hosting you your applications. It acts as a web proxy for your application, which is useful when using AutoDevOps or deploying your own apps.\n    - **Cert Manager**: This is a native Kubernetes certificate management controller, which helps in issuing certificates using Let’s Encrypt. You don’t need this if you want to use a custom Certificate issuer.\n    - **Prometheus**: GitLab uses the Prometheus integration for automatic monitoring of your applications to collect metrics from Kubernetes containers allowing you to understand what is going on from within the GitLab UI.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_post_cluster.png)\n\n4. To make use of Auto Review Apps and Auto Deploy stages of\n[AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/quick_start_guide.html),\nyou will need to specify a Base Domain name with a wild card DNS pointing to\nthe Ingress Endpoint generated when you Install Ingress in the list of\npredefined apps.\n\n\n## Summary\n\n\nIn this tutorial, we looked at how GitLab integrates with Amazon EKS,\nallowing Kubernetes clusters to be created easily from the GitLab UI after\nsetting the right permissions. As we’ve seen, developer productivity is\ngreatly improved by no longer having to manually set up clusters. Also, the\nsame cluster can be used for multiple projects when Amazon EKS is integrated\nwith GitLab at the Group and Instance levels, thus making onboarding new\nprojects a breeze. After integration, the possibilities of what developers\ncan achieve is enormous.\n\n\nIn the next part of this tutorial, we will look at how to deploy your\napplications on an Amazon EKS cluster using AutoDevOps.\n",[720,869,9],{"slug":1015,"featured":6,"template":699},"gitlab-eks-integration-how-to","content:en-us:blog:gitlab-eks-integration-how-to.yml","Gitlab Eks Integration How To","en-us/blog/gitlab-eks-integration-how-to.yml","en-us/blog/gitlab-eks-integration-how-to",{"_path":1021,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1022,"content":1028,"config":1033,"_id":1035,"_type":13,"title":1036,"_source":15,"_file":1037,"_stem":1038,"_extension":18},"/en-us/blog/gitlab-first-deployed-kubernetes-api-to-multiple-clouds",{"title":1023,"description":1024,"ogTitle":1023,"ogDescription":1024,"noIndex":6,"ogImage":1025,"ogUrl":1026,"ogSiteName":685,"ogType":686,"canonicalUrls":1026,"schema":1027},"GitLab deploys into multiple clouds from kubectl using Crossplane","We're proud to be advancing our commitment to multicloud DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680612/Blog/Hero%20Images/crossplane.png","https://about.gitlab.com/blog/gitlab-first-deployed-kubernetes-api-to-multiple-clouds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab deploys into multiple clouds from kubectl using Crossplane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2019-05-20\",\n      }",{"title":1023,"description":1024,"authors":1029,"heroImage":1025,"date":1030,"body":1031,"category":300,"tags":1032},[949],"2019-05-20","\n\nToday the [Crossplane community](https://crossplane.io) is demonstrating how GitLab is the first real-world application [deployed across multiple clouds via Crossplane](http://blog.crossplane.io/crossplane-composes-fully-managed-services-in-kubernetes-to-deploy-gitlab-into-multiple-clouds/), the open source multicloud control plane. When [Crossplane launched](/blog/opensource-multi-cloud-crossplane/), we were excited to learn about a project that is taking the complexity out of managing services across multiple clouds. We believe this is the last hurdle to [multicloud maturity](https://medium.com/gitlab-magazine/multi-cloud-maturity-model-2de185c01dd7).\n\nAs some of our competitors move closer to supporting one cloud, GitLab is committed to [multicloud DevOps](/topics/multicloud/). Today, our customers already can install and deploy from GitLab to any public cloud. In the future and as the Crossplane project matures, we plan to leverage Crossplane to help our customers take multicloud one step further to deploy GitLab entirely through the Kubernetes API into multiple clouds,  including the use of fully-managed services offered by the respective cloud providers.\n\n## Deploy GitLab with external managed services using kubectl\n\nReal-world applications like GitLab can now be deployed entirely from kubectl into multiple clouds using Crossplane, including their external managed services. Crossplane extends the Kubernetes API by adding resource claims and resource classes to support composability of managed service dependencies in Kubernetes, similar to persistent volume claims and storage classes. Crossplane is easily added to any existing Kubernetes cluster and cleanly layers on top of clusters provisioned by Anthos, EKS, AKS, and OpenShift.\n\nCluster administrators install Crossplane on a Kubernetes cluster, set cloud credentials, and specify which managed services they want to make available for self-service provisioning within the cluster. Policies guide binding to specific managed service offerings configured by the cluster administrator.\n\nApplication owners can consume and compose these managed services on-demand with the Kubernetes patterns they’re familiar with today, without having to know about the infrastructure details or having to manage credentials.\n\nThis provides an excellent separation of concerns and makes applications more portable, while retaining flexibility for cluster administrators to tailor how they want these managed services to be provisioned in their environments.\n\nFor production deployments, GitLab [recommends using external managed services](https://gitlab.com/charts/deploy-image-helm-base/blob/master/doc/installing.md) for Redis, PostgreSQL, and object storage. Crossplane supports declaring these managed services as resource claims in Kubernetes that dynamically bind to the appropriate cloud provider using resource classes configured by the cluster administrator to provide the managed service.\n\n### Deploy GitLab to multiple clouds using Crossplane with the following steps:\n\n#### Cluster Administrator:\n1. Install Crossplane on your Kubernetes cluster\n1. Set cloud provider credentials\n1. Provide managed services with resource classes\n\n#### Application Owner:\n1. Provision managed services with resource claims\n1. Bind resource claims into the exported GitLab Helm chart\n1. Deploy the GitLab application with Crossplane managed services\n\nThat's it! GitLab is now up and running in your cloud of choice, using fully-managed services for Redis, PostgreSQL, and storage buckets!\n\nPlease see the [Crossplane blog post](http://blog.crossplane.io/crossplane-composes-fully-managed-services-in-kubernetes-to-deploy-gitlab-into-multiple-clouds/) to learn more about deploying GitLab to multiple clouds with external managed service dependencies, including the full instructions so you can follow along in your own environment.\n\n## Multicloud success in the enterprise\n\nWith [81 percent of enterprises](https://www.rightscale.com/blog/cloud-industry-insights/cloud-computing-trends-2018-state-cloud-survey) already adopting a multicloud strategy, technologies like Crossplane are key to enterprise adoption success. While the ability to choose any cloud to run on is important, the practicalities of developing and deploying applications in multiple clouds is complex, driving up development costs. Crossplane introduces a set of workload resource abstractions on top of existing managed services and cloud offerings to enable workload portability across cloud providers. This allows developers to produce complex applications that can be deployed anywhere, while enabling operational teams to manage cloud infrastructure by policy and business priorities.\n\n“We’re showing a real-world example of the future of multicloud today,” said Bassam Tabbara, CEO of Upbound and maintainer on Crossplane. “GitLab is a production application that relies on multiple fully-managed services, so by abstracting these services and integrating them with the declarative Kubernetes API, we are demonstrating the ability to standardize on a single declarative API to manage it all.”\n\n## Find us at KubeCon Barcelona this week\n\nGitLab will be at KubeCon Barcelona this week and we would love to meet you to talk to you about how GitLab can help you with your Multicloud strategy.\n\nJoin us at the Multicloud 360 event at KubeCon on Tuesday, from 8:30 pm to midnight, alongside Upbound, Google Cloud, Digital Ocean and CockroachDB. [RSVP here](https://www.eventbrite.com/e/multicloud-360-tickets-60623662005) to claim your spot.\n\n![multicloud 360](https://about.gitlab.com/images/blogimages/multicloud-360.jpeg)\n\nIn addition visit GitLab at the KubeCon Booth, S21, to learn more about GitLab and Kubernetes and be sure to check out everything else we are involved in [here](/blog/kubernetes-kubecon-barcelona/).\n",[719,9],{"slug":1034,"featured":6,"template":699},"gitlab-first-deployed-kubernetes-api-to-multiple-clouds","content:en-us:blog:gitlab-first-deployed-kubernetes-api-to-multiple-clouds.yml","Gitlab First Deployed Kubernetes Api To Multiple Clouds","en-us/blog/gitlab-first-deployed-kubernetes-api-to-multiple-clouds.yml","en-us/blog/gitlab-first-deployed-kubernetes-api-to-multiple-clouds",{"_path":1040,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1041,"content":1047,"config":1052,"_id":1054,"_type":13,"title":1055,"_source":15,"_file":1056,"_stem":1057,"_extension":18},"/en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"title":1042,"description":1043,"ogTitle":1042,"ogDescription":1043,"noIndex":6,"ogImage":1044,"ogUrl":1045,"ogSiteName":685,"ogType":686,"canonicalUrls":1045,"schema":1046},"How to use GitLab for Agile, CI/CD, GitOps, and more","Read our example engineering stories from the past two years that show how to use GitLab for you DevOps cycle, including GitOps, CI/CD and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681825/Blog/Hero%20Images/triangle_geo.jpg","https://about.gitlab.com/blog/gitlab-for-cicd-agile-gitops-cloudnative","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile, CI/CD, GitOps, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":1042,"description":1043,"authors":1048,"heroImage":1044,"date":760,"body":1050,"category":762,"tags":1051},[1049],"Sara Kassabian","\n\nOn this blog, our community frequently shares tips, tricks, stories, and tutorials that demonstrate how to do different things with GitLab. This collection features some of our most popular and enduring how-to blog posts from the past two years, covering [CICD](/topics/ci-cd/), GitOps, Machine learning and more! See how various team members, companies, and users leverage GitLab to deliver software faster and more efficiently by reading and watching some of the tutorials we've featured.\n\n## Code review with GitLab\n\nWe know that code review is essential to effective collaboration, but the logistics of it all can be challenging. [Master code review by watching the demo](/blog/demo-mastering-code-review-with-gitlab/) included with this blog post.\n\n## Cool ways to use GitLab CI/CD\n\n### The basics of CI/CD\n\nBrand new to CI/CD? Read our [beginner's guide to the vocabulary and concepts](/blog/beginner-guide-ci-cd/).\n\nHere’s the [code you’ll need to build a CI/CD pipeline](/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab/) with AutoDeploy to Kubernetes, using GitLab and Helm.\n\nNext, find the [code you'll need to build a CI pipeline with GitLab](/blog/basics-of-gitlab-ci-updated/), allowing you to run jobs sequentially, in parallel, or out of order.\n\n### Pipelines with CI/CD\n\nLearn how to [build a CI/CD pipeline in 20 minutes (or less) using GitLab’s AutoDevOps](/blog/building-a-cicd-pipeline-in-20-mins/) capabilities by following the instructions in this blog post, which is based on a popular GitLab Commit Brooklyn presentation that you can watch below.\n\nDiscover [how to trigger pipelines across multiple projects](/blog/cross-project-pipeline/) using GitLab CI/CD.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD with Android\n\nAndroid project users are in luck because in [this post we explain how to set up GitLab continuous integration (CI) functions](/blog/setting-up-gitlab-ci-for-android-projects/) in Android projects.\n\nGitLab and fastlane pair up to [help users publish applications to the iOS store](/blog/ios-publishing-with-gitlab-and-fastlane/) using a GitLab CI/CD runner.\n\n### CI/CD and GKE\n\n![GitLab CI/CD and GKE integration](https://about.gitlab.com/images/blogimages/gitlab-gke-integration-cover.png){: .shadow.medium.center}\n\nWe explain [how to get started with GitLab CI/CD and Google Kubernetes Engine (GKE)](/blog/getting-started-gitlab-ci-gcp/) in this initial demo.\n\nGitLab self-managed user? ✅\nUsing Google Kubernetes engine? ✅\nGreat! The [next tutorial is all about how to use GitLab CI to install GitLab runners on GKE](/blog/gitlab-ci-on-google-kubernetes-engine/) using our integration. It shouldn’t take you more than 15 minutes.\n\n## GitLab for machine learning\n\nBut what about GitLab for machine learning? We’ve got you covered. Watch the demo from GitLab Virtual Commit to see how you can use GitLab to leverage tasks for machine learning pipelines.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/DJbQJDXmjew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab for Agile\n\nGitLab features work for many software development methodologies, including [Agile](/solutions/agile-delivery/).\n\nStart by [mapping Agile artifacts to GitLab features](/blog/gitlab-for-agile-software-development/) and explore how iteration works using GitLab.\n\n![GitLab issue board](https://about.gitlab.com/images/blogimages/issue-board.png){: .shadow.medium.left}\n\nThe GitLab issue board allows for flexible workflows and can be organized to represent [Agile software development](/topics/agile-delivery/) states.\n{: .note.text-center}\n\nThen go more in-depth to learn [how to use GitLab for Agile portfolio planning and project management](/blog/gitlab-for-agile-portfolio-planning-project-management/).\n\n## Giddy for GitOps?\n\n[GitOps](/topics/gitops/) takes DevOps best practices that are used for application development such as [version control](/topics/version-control/), collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitLab is the [DevOps platform](/topics/devops/) that does it all, and it’s built using Git, making it the ideal solution for GitOps processes.\n\nFirst, we explained [how GitLab and Ansible can be used together for GitOps](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/) processes and [infrastructure as code](/topics/gitops/infrastructure-as-code/). In a follow-up post, we explain how [GitLab can also be paired with Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/) and IaC.\n\nThe video on how to use Ansible and GitLab together has been viewed more than 13,000 times since it was first created in 2019, and is embedded for you below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/M-SgRTKSeOg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Visibility\n\nOne of our principles at GitLab is to [dogfood everything](/handbook/engineering/development/principles#dogfooding), so you can rest assured that we aren’t about to introduce an engineering feature without first trying it out for ourselves. When it comes to our Insights tool though, the process happened in reverse. Our Engineering Productivity team at GitLab needed a particular tool, and as we built it, we realized it would benefit our GitLab Ultimate customers. Read on to [learn how our Insights tool came to be](/blog/insights/).\n\nDig into this [valuable explanation of how we discovered that Prometheus query language can be used to detect anomalies](/blog/anomaly-detection-using-prometheus/) in the time-series data that GitLab.com reports.\n\n## In the clouds\n\nWatch the demo to learn how GitLab runner and RedHat OpenShift can work together to jump start your application development and deployment to the cloud.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yGWiQwrWimk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd finally, although Docker Hub may be enforcing new rate limits, there's no need to panic. We [explain how to build a monitoring plug-in](/blog/docker-hub-rate-limit-monitoring/) to help you monitor the number of pull requests.\n\nCan you think of some other stand-out blog posts or demos that we should include here? Drop the link in a comment below.\n\nCover image by [Chris Robert](https://unsplash.com/@chris_robert) on [Unsplash](https://unsplash.com/photos/kY-uPDLXxHg)\n{: .note}\n",[718,742,696,9],{"slug":1053,"featured":6,"template":699},"gitlab-for-cicd-agile-gitops-cloudnative","content:en-us:blog:gitlab-for-cicd-agile-gitops-cloudnative.yml","Gitlab For Cicd Agile Gitops Cloudnative","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative.yml","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"_path":1059,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1060,"content":1066,"config":1072,"_id":1074,"_type":13,"title":1075,"_source":15,"_file":1076,"_stem":1077,"_extension":18},"/en-us/blog/gitlab-markdown-tutorial",{"title":1061,"description":1062,"ogTitle":1061,"ogDescription":1062,"noIndex":6,"ogImage":1063,"ogUrl":1064,"ogSiteName":685,"ogType":686,"canonicalUrls":1064,"schema":1065},"A 5-minute Markdown tutorial","New to GitLab? New to Markdown? Here's a quick explainer on using Markdown to format text all over GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671172/Blog/Hero%20Images/markdown-tutorial-cover.png","https://about.gitlab.com/blog/gitlab-markdown-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A 5-minute Markdown tutorial\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-17\",\n      }",{"title":1061,"description":1062,"authors":1067,"heroImage":1063,"date":1069,"body":1070,"category":762,"tags":1071},[1068],"Rebecca Dodd","2018-08-17","\n\nAt GitLab, we love [Markdown](https://docs.gitlab.com/ee/user/markdown.html) for providing a simple, clean way to add styling and formatting to plain text, that's visible and repeatable across multiple applications. This means you can copy and paste the text without losing the formatting, and it makes [reviewing diffs](https://docs.gitlab.com/ee/development/merge_request_concepts/diffs/) easier, as you're still reviewing plain text with no hidden data.\n\n## What is Markdown?\n\nMarkdown is a lightweight markup language created by John Gruber in 2004. Markdown lets you add formatting elements to plaintext text documents. Since its creation, markdown has become one of the world’s most popular markup languages. There are many web-based applications specifically built for writing in Markdown. Markdown syntax is designed to be readable and simple.\n\n## Markdown tutorial\n\nGitLab Product Marketing Manager [William Chia](/company/team/#thewilliamchia) recorded this five-minute Markdown tutorial for another GitLab team-member, so you can see how Markdown works within GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ix416lAYRSg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab Flavored Markdown\n\nGitLab uses [GitLab Flavored Markdown](https://gitlab.com/help/user/markdown#gitlab-flavored-markdown-gfm) (GFM) for other handy functionality not supported by standard Markdown. Here are a few useful things you can do with GFM:\n\n### Reference issues, commits, merge requests, or team members\n\nWhen you type `#12` (or any number) in an issue, it will automatically create a link to the corresponding issue in that project. You can also [easily reference other GitLab-specific items](https://gitlab.com/help/user/markdown#special-gitlab-references).\n\n### Autolink URLs\n\n You don't have to use the standard `[]()` format to create a link: just pasting the URL will [autolink it](https://gitlab.com/help/user/markdown#url-auto-linking).\n\n### Create diagrams and flowcharts\n\nIn [GitLab 10.3](/releases/2017/12/22/gitlab-10-3-released/#flow-charts-sequence-diagrams-and-gantt-diagrams-in-gitlab-flavored-markdown-gfm-with-mermaid) we added the ability to [generate diagrams and flowcharts](https://gitlab.com/help/user/markdown#mermaid) using [mermaid](https://mermaidjs.github.io/).\n\n### Quick actions\n\nOpen or close issues, reassign merge requests, add todos, unsubscribe from issues – these are just a few things you can do with GFM [quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html), all without leaving your keyboard. Just type `/` and a list of options will appear.\n\nThese are just a few examples of GFM – see the [Markdown documentation](https://docs.gitlab.com/ee/user/markdown.html) for a full list. We're adding to it all the time: as of our last release you can quickly [make an issue confidential](/releases/2018/07/22/gitlab-11-1-released/#confidential-issue-quick-action) right from the issue comment field. This was a community contribution, and we invite you to [contribute](/community/contribute/) functionality and quick actions you'd find useful too!\n\n## Benefits of using Markdown\n\nSome may be skeptical of using Markdown when there are other options – like a WYSIWYG editor. But the benefits of using markdown are hard to ignore:\n\n* Markdown is crazy versatile. It can be used for everything including (but not limited to) websites, notes, presentations, emails, and documents of all kinds.\n* Markdown isn’t picky about its operating system. You can create Markdown-formatted text on any device running any operating system.\n* Markdown can be used on the move, so to speak. Markdown-formatted text can be opened using virtually any application. You can also import your Markdown files into another Markdown application if you decide to make a change.\n* The Markdown text you create won’t become obsolete. Even if the application you’re using stops working down the line, you’ll still be able to read your Markdown-formatted text using a text editing application.\nThe fact that it is the backbone of so much web content means that you might be the odd one out if you DON’T use it.\n\n## How to get started with Markdown\n\nThere are a few ways you can learn about how to get started with Markdown.\n\nThe first is to check out online tutorials. You can find a number of resources on Markdown, including the [original guide by John Gruber](https://daringfireball.net/projects/markdown/) and a [Markdown Tutorial](https://www.markdowntutorial.com/) open-source website that you can use to try out Markdown in your web browser.\n\nOr, just try it out with the Notepad application on a device. Since Markdown is just plain text, you can write it in any text editor, such as Notepad. Save a file with the .MD file extension to make a proper Markdown file.\n\nThe second (and a highly encouraged) way to get the hang of Markdown is to check out some [free online Markdown editors](https://www.makeuseof.com/tag/online-markdown-editors/) to test the waters - many of which are great for just learning how to write in Markdown. Markdown editors like StackEdit and Dillinger can help your efforts to get started with Markdown.\n\nFor the most optimal Markdown experience, a writing app that's built for Markdown is typically the best way to go.\n",[9,696],{"slug":1073,"featured":6,"template":699},"gitlab-markdown-tutorial","content:en-us:blog:gitlab-markdown-tutorial.yml","Gitlab Markdown Tutorial","en-us/blog/gitlab-markdown-tutorial.yml","en-us/blog/gitlab-markdown-tutorial",{"_path":1079,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1080,"content":1086,"config":1092,"_id":1094,"_type":13,"title":1095,"_source":15,"_file":1096,"_stem":1097,"_extension":18},"/en-us/blog/gitlab-remote-ceo-shadow-takeaways",{"title":1081,"description":1082,"ogTitle":1081,"ogDescription":1082,"noIndex":6,"ogImage":1083,"ogUrl":1084,"ogSiteName":685,"ogType":686,"canonicalUrls":1084,"schema":1085},"GitLab CEO Shadow program takeaways and lessons learned","I attended every meeting with GitLab's CEO for two weeks. This is what I learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681331/Blog/Hero%20Images/all-remote-world-banner-1920x1080.png","https://about.gitlab.com/blog/gitlab-remote-ceo-shadow-takeaways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CEO Shadow program takeaways and lessons learned\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2020-05-22\",\n      }",{"title":1081,"description":1082,"authors":1087,"heroImage":1083,"date":1088,"body":1089,"category":693,"tags":1090},[799],"2020-05-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n![GitLab all-remote mentor](https://about.gitlab.com/images/all-remote/ceo-shadow-gitlab-awesomeness.jpg){: .shadow.medium.center}\n\nI'm normally GitLab's Head of Remote, but for two weeks I attended every meeting with GitLab's CEO, alongside two wonderful co-shadows.\n\nThe final tally? 110 Zoom calls in 10 working days. That's an average of 11 video calls *per day*.\n\nCreating an ever more intriguing backdrop, I shadowed during a global pandemic (COVID-19). Not only was much of the world weeks into an extended period of isolation, but every leader on the planet was grappling with an array of factors that no one saw coming six months prior. From economic conditions to overall mental health and wellbeing, nothing was normal. Well, except one thing, which I'll address below.\n\n### What is the GitLab CEO Shadow program?\n\nIf you aren't familiar with GitLab's [CEO Shadow program](https://handbook.gitlab.com/handbook/ceo/shadow/), I won't rehash what's already in the handbook. By the time you read this, new shadows will have already improved the page since my rotation. It is hands-down the most enlightening, transparent, career-enhancing program I've taken part in, and should serve as a blueprint for other companies. Any firm could implement a shadow program, and I believe they should.\n\n### What's your biggest takeaway?\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4yhtYcOZn3w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*In the [GitLab Unfiltered](https://www.youtube.com/playlist?list=PL05JrBw4t0Kq7QUX-Ux5fOunQotqJbECc) video above, GitLab's Head of Remote recaps his remote CEO Shadow rotation at GitLab in May 2020.*\n\nThis is the question I've received the most.\n\nThat bit in the intro about one thing being normal? GitLab's [values](https://handbook.gitlab.com/handbook/values/) were evident in **every** Zoom, and in **every** suggestion, decision, and bit of feedback. That part remained normal. What this means is that GitLab's values do not oscillate depending on economic conditions or other stressors.\n\nI want to be clear that these values were exemplified not *just* by GitLab's CEO, but the entire [e-group](/company/team/structure/#e-group). The takeaway there is that GitLab's CEO carries a significant responsibility to live our values in his 1-on-1 interactions with his direct reports and board members, with that reinforcement cascading to other interactions throughout the organization.\n\nBelow, I'll share other takeaways from the program.\n\n### Remote work will soon be — simply — work\n\nIt's happening. From [Twitter](https://blog.twitter.com/en_us/topics/company/2020/keeping-our-employees-and-partners-safe-during-coronavirus.html) to [Square](https://www.theverge.com/2020/5/18/21261798/square-employees-work-from-home-remote-premanent-policy-ceo) to [Coinbase](https://blog.coinbase.com/post-covid-19-coinbase-will-be-a-remote-first-company-cdac6e621df7) to [Shopify](https://twitter.com/tobi/status/1263483496087064579) to [Facebook](https://www.theverge.com/facebook/2020/5/21/21265699/facebook-remote-work-shift-workforce-permanent-covid-19-mark-zuckerberg-interview), remote is now an option. My CEO Shadow rotation was just prior to the series of dominos above falling, and none of the Zooms were difficult. Even for suddenly-remote particpants, it just worked. Everything just felt more human. One participant joined a Zoom from his daughter's school laptop and proceeded as \"Billie the Sea Turtle.\"\n\nI suspect some of these meetings would've been daunting in-person due to perceived power imbalances. In a video call, conversation is far more democratized.\n\n### Transparency is liberating\n\nFacades are mentally taxing. Just call it like it is. I was heartened by the many sub-values surrounding this which were lived out in interactions.\n\n* [It's impossible to know everything](https://handbook.gitlab.com/handbook/values/#its-impossible-to-know-everything)\n* [No ego](https://handbook.gitlab.com/handbook/values/#no-ego)\n* [Say sorry](https://handbook.gitlab.com/handbook/values/#say-sorry)\n* [Don't let each other fail](https://handbook.gitlab.com/handbook/values/#dont-let-each-other-fail)\n* [Blameless problem solving](https://handbook.gitlab.com/handbook/values/#blameless-problem-solving)\n* [Short toes](https://handbook.gitlab.com/handbook/values/#short-toes)\n* [Anyone and anything can be questioned](https://handbook.gitlab.com/handbook/values/#anyone-and-anything-can-be-questioned)\n\nIn one particular meeting, Sid had his mind changed through the introduction of new data. That led to a [merge request](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/50052/diffs) where we added a sub-value to Transparency — [Articulate when you change your mind](https://handbook.gitlab.com/handbook/values/#articulate-when-you-change-your-mind) — during my CEO Shadow rotation.\n\n*How awesome is that?*\n\n### Make a proposal\n\nThis one was big for me. Sid continually sought proposals from those in meetings. It made me realize that I could improve in this area. Now, whenever I have to urge to just talk unscripted about a project, I stop, open a Google Doc, and start writing.\n\nShowing up with a proposal rather than a collection of unsorted thoughts is a way to be [respectful of others' time](https://handbook.gitlab.com/handbook/values/#be-respectful-of-others-time).\n\nSid would rather have a weak proposal that he can shape than scattered thoughts in which no action can be taken. Since my rotation, I've found myself asking for more proposals and showing up with more proposals.\n\n### Shadows are more than shadows\n\nYou become a part of the ebb and flow of the day. You're with the CEO and your co-shadow *so much* that you can't help but bond. You laugh together, you grab lunch together, you share experiences together.\n\nYou're also in a very special position. You're able to [introduce yourself](https://handbook.gitlab.com/handbook/ceo/shadow/#ceo-shadow-introductions) as a member of the CEO Shadow program to people that are very difficult to get time with. You meet people outside of GitLab who are doing incredible things to create positive change in the world.\n\nI left with over a dozen new relationships, having met brilliant, passionate people all over the world who will remember me from \"that one time in GitLab's CEO Shadow program....\" That is remarkable.\n\n### Be succinct\n\nTalk in the details or not at all.\n\n### If a design is taking too long, break it down\n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is the hardest value to practice.\n\nWe're conditioned to believe that breaking something down is done because we're lazy or incompetent. Nothing could be [further from the truth](https://handbook.gitlab.com/handbook/values/#low-level-of-shame).\n\n### Everyone loves a cancelled meeting\n\nWhen everyone is isolated and no one is experiencing travel delays, there's no excuse to cancel. In two weeks, I recall just one meeting being cancelled last-minute. Myself, Sid, and my co-shadow collectively looked at each upon hearing that the participant had apologized and said: \"Don't apologize!\"\n\nWe all took a much-needed break, grabbed some water, and I took a quick walk outside. It was glorious.\n\n### Sid loves the GitLab product\n\nIt is fascinating to watch Sid interact with all areas of the business. It's stunning how well-versed he is in everything from engineering to finance to legal, but what is abundantly clear is his passion for *product*. He is a self-proclaimed \"Product CEO,\" and you'll [see this in action](/handbook/ceo/#pointers-from-ceo-direct-reports) if you're a CEO Shadow.\n\n### The broader community is always top-of-mind\n\nOn any call where Sid is given the floor to explain what GitLab is, he reminds people how important the broader community is. He references instances where those outside of the GitLab organization contribute feedback, code, and improvements to our product and our handbook.\n\n\"[Everyone can contribute](/company/mission/#mission)\" is inserted into more conversations than you would assume.\n\n### Sid cares about the greatest amount of people\n\nWhen's the last time you looked at the [Efficiency for the right group](https://handbook.gitlab.com/handbook/values/#efficiency-for-the-right-group) sub-value? The heart of that matters to Sid. When speaking of big, encompassing ideas, Sid continually asked those making the proposal to articulate outcomes for the broadest group.\n\n### Zoom fatigue is real\n\n110 video calls in two weeks is a lot. [Danielle M.](https://twitter.com/DanielleMorrill), an [alumnus](https://handbook.gitlab.com/handbook/ceo/shadow/#alumni) of the CEO Shadow program, recommended I take only essential meetings in the week immediately following my two-week CEO Shadow rotation to give my [eyes and mind a rest](https://www.bbc.com/worklife/article/20200421-why-zoom-video-chats-are-so-exhausting).\n\nBeing on the other side of the program, I *wholeheartedly* second this recommendation.\n\n### Being a CEO requires sacrifice\n\nSid doesn't live a normal life — or, what Americana would have you believe a normal life looks like. He sleeps well, he schedules time with family, and he takes weekends off of work. He models a healthy balance, all things considered. But make no mistake, a CEO carries around a massive amount of responsibility, and time for yourself is hard to come by.\n\nI think you have to be born for it. You can learn to be a CEO, but there's an intangible element there as well.\n\n### Reviewing is easier than creating\n\nSid explained to me that he's able to handle 11 back-to-back Zoom calls because \"reviewing is easier than creating.\" Which is true. In many of Sid's meetings, others have spent time creating so that Sid can review and provide feedback.\n\nIf you're in a creative role, be intentional about creating [white space](https://jkglei.com/white-space/) in your calendar to create.\n\nI view being booked at 100% as a risk. If this resonates with you, check out [Kingman's Formula](https://blog.acolyer.org/2015/04/29/applying-the-universal-scalability-law-to-organisations/) for the mathematics behind it.\n\n### GitLab is extraordinarily efficient\n\nYou can tell within 10 seconds if a meeting is being ran [the GitLab way](/company/culture/all-remote/meetings/) or not.\n\n### Never apologize for family\n\nWith most of the world in lockdown, family was everywhere during my rotation (which was awesome!). Sid continually [celebrated](https://handbook.gitlab.com/handbook/values/#make-family-feel-welcome) that, and reminded folks not to apologize for children, pets, or for having to dart out of a meeting to handle something on the homefront.\n\n[**#FamilyAndFriends1st**](https://handbook.gitlab.com/handbook/values/#family-and-friends-first-work-second)\n\n### Never have a meeting without an agenda\n\nUnless it's a [coffee chat](/company/culture/all-remote/informal-communication/#coffee-chats). In which case, *savor it*.\n\n![GitLab all-remote team](https://about.gitlab.com/images/all-remote/GitLab-All-Remote-Zoom-Team-Tanuki.jpg){: .shadow.medium.center}\n\n### Did you miss out on anything as a remote shadow?\n\nI am appreciative of the opportunity to shadow a CEO during a global pandemic. It would have been easy to pause the program for a bit. Instead, GitLab retained a beacon of transparency and [pivoted the program to remote](https://handbook.gitlab.com/handbook/ceo/shadow/#tips-for-remote-shadows) with poise. Kudos to Sid and Cheri Holmes, his [Executive Business Administrator](/handbook/eba/#executive-business-administrator-team).\n\nAs GitLab's Head of Remote, this was interesting on a personal level. Typically, the CEO Shadow program is a rare case where in-person attendance is expected. This is due to Sid's typical schedule, which involves quite a few in-person meetings in the San Francisco Bay Area. (I suspect this will change dramatically going forward, with many meetings being Zoom-by-default.)\n\nDue to much of the world being in lockdown, all of Sid's meetings in early May 2020 were remote and conducted via Zoom video calls.\n\nI didn't feel as if I missed out on anything. I was fully engaged and fully welcomed, and I was able to chat with Sid and my co-shadow during the occasional break.\n\n### Any notable perks of a remote CEO Shadow rotation?\n\nAs a dad, I felt fortunate to participate from home. The lack of a commute, even from hotel to [Mission Control](https://handbook.gitlab.com/handbook/ceo/shadow/#mission-control-guide), enabled me to maintain a fitness routine despite 11 meetings per day. I aspire to participate in a future rotation that's in-person, mostly for personal reasons. I love the San Francisco Bay Area and would take any opportunity to visit.\n\nThe time zone shift was a boon for me. I prefer to get outside in the morning, and being based 3 hours ahead of Sid provided a few bonus hours in the morning to do so.\n\n### Q&A\n\n![GitLab all-remote ergonomic workspace](https://about.gitlab.com/images/all-remote/gitlab-com-all-remote-v3-dark-1280x270.png){: .shadow.medium.center}\n\nI asked for questions on Twitter, as well as on GitLab's internal Slack. These are some of the inquiries I received.\n\n### How did you start your day?\n\nGifted with a 3-hour headstart from Sid's time zone, I typically rose with my family, had coffee, cleaned up my son's breakfast escapades, and squeezed in a 45-minute cardio session.\n\nOn perfect days, I'd sit down with 15 or 20 minutes to spare ahead of Sid's first call, enough time to see if my team needed anything before a marathon of documenting and learning.\n\nI'm used to working [non-linear days](/company/culture/all-remote/non-linear-workday/), so it aligned well with my preferences.\n\n### How did you align lunch breaks?\n\nYou figure out that you can get super creative on what is consumable in 180 seconds.\n\n### Did you engage in virtual social time together?\n\nWe'd hop on Zoom chats between calls, and after all calls were wrapped on certain days. I really enjoyed my last day, where Sid and my co-shadow joined for a 45-minute spontaneous coffee chat.\n\n### How hard is it to catch up after the CEO Shadow program?\n\nIn preparing for the program, you're instructed to \"[prepare your team as if you were on vacation](https://handbook.gitlab.com/handbook/ceo/shadow/#preparing-for-the-program).\" I did this, but checked in on notifications between calls.\n\nFor context, it was an unusual time. Given the global focus on remote, I was fielding interviews on the subject that were atypically time sensitive.\n\nDue to this, I didn't feel entirely disconnected upon return, but I was surely less rested.\n\nI was intentional about prioritizing the CEO Shadow experience and I muted all notifications for the entire two-week span. (I mute all Mac notifications anyway, and limit iPhone notifications to text messages, so this wasn't a drastic difference in routine.)\n\nI even [added a new task](https://gitlab.com/gitlab-com/ceo-shadow/onboarding/-/merge_requests/20/diffs) to our CEO Shadow onboarding issues to remind shadows to change their `GitLab.com` and Slack status messages to indicate that they're in the CEO Shadow program and are focused on the `#ceo-shadow` Slack channel.\n\n### Feelings before going to sleep prior to your first day?\n\nInvigorated. I participated in over 50 remote work webinars, podcasts, and interviews the six weeks prior to my rotation. I was super excited to completely shift gears and soak in something new.\n\n### Are Sid's meetings always on-time?\n\nRoughly 97% of them begin on time. Roughly 80% of them end on time. Roughly 99% of them end before the next one begins, even if it's just by a few seconds.\n\nMost are 25 minutes. A few are 50 minutes.\n\n### Did you actively participate in meetings?\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JM8kBqqVFrU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*In the [GitLab Unfiltered](https://www.youtube.com/playlist?list=PL05JrBw4t0Kq7QUX-Ux5fOunQotqJbECc) video above, GitLab's co-founder and CEO (Sid) and Head of Remote (Darren) chat with investor Sam Endacott during a remote CEO Shadow rotation.*\n\nSome! In meetings related to marketing, communications, messaging, and community, there were moments where I was asked to chime in. I was also asked a few questions in [1-on-1 meetings](/handbook/leadership/1-1/), the [e-group meeting](/handbook/ceo/#daily-meetings), and a [retrospective](/handbook/ceo/#monthly-meetings) meeting.\n\nSerendipitously, Sid and I were [interviewed](https://youtu.be/gOp4lKSCulI) by Adrian Larssen at Business Insider during my CEO Shadow rotation, while investor Sam Endacott allowed us to [livestream our conversation](https://youtu.be/JM8kBqqVFrU) on markets and remote work.\n\nI savored these moments. It was awesome to see \"everyone can contribute\" lived as a shadow. I felt welcome, and I felt my input was considered and appreciated.\n\nI enjoyed listening to my co-shadows give their input in meetings. It's an excellent opportunity to learn and develop in a very unique space.\n\n### Be honest — 40 hours per week, or more?\n\nSid's calendar is public for those within the GitLab organization. You can see that he tries to cap meetings at around 8 to 9 hours per day. However, given how much of what you experience is brand new, this may *feel* like more.\n\n![GitLab all-remote family workspace](https://about.gitlab.com/images/all-remote/gitlab-home-office-family.jpg){: .shadow.medium.center}\n\nIf you're considering the CEO Shadow program, check the [eligibility requirements](https://handbook.gitlab.com/handbook/ceo/shadow/#eligibility) and **apply**!\n\nIf you're outside of the GitLab organization, please encourage your leadership team to implement a similar program. I can't recommend it highly enough.\n\nIf you still have questions, reach out on Twitter: [@darrenmurph](https://twitter.com/darrenmurph)\n",[9,1091,9],"agile",{"slug":1093,"featured":6,"template":699},"gitlab-remote-ceo-shadow-takeaways","content:en-us:blog:gitlab-remote-ceo-shadow-takeaways.yml","Gitlab Remote Ceo Shadow Takeaways","en-us/blog/gitlab-remote-ceo-shadow-takeaways.yml","en-us/blog/gitlab-remote-ceo-shadow-takeaways",{"_path":1099,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1100,"content":1106,"config":1112,"_id":1114,"_type":13,"title":1115,"_source":15,"_file":1116,"_stem":1117,"_extension":18},"/en-us/blog/gitlab-vscode-extension",{"title":1101,"description":1102,"ogTitle":1101,"ogDescription":1102,"noIndex":6,"ogImage":1103,"ogUrl":1104,"ogSiteName":685,"ogType":686,"canonicalUrls":1104,"schema":1105},"A VS Code extension for GitLab: GitLab Workflow","Senior Frontend Engineer Fatih Acet created a VS Code extension, GitLab Workflow, which allows you to do many GitLab-specific tasks quickly and easily.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680196/Blog/Hero%20Images/vs-code-extension-gitlab-workflow.jpg","https://about.gitlab.com/blog/gitlab-vscode-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A VS Code extension for GitLab: GitLab Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"}],\n        \"datePublished\": \"2018-03-01\",\n      }",{"title":1101,"description":1102,"authors":1107,"heroImage":1103,"date":1109,"body":1110,"category":762,"tags":1111},[1108],"Fatih Acet","2018-03-01","\n\nWe recently did a survey within the Frontend team to see which tools we were using and how we were using them, in order to learn from one another and to build better development workflows. Through this survey, we determined that [Visual Studio Code (VS Code)](https://code.visualstudio.com/) is the most used integrated development environment (IDE) within the team. This led to the idea for a GitLab extension for VS Code that could help reduce context switching and boost productivity.\n\nUpdate: Read [eight tips for using the GitLab VS Code extension](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/) and about [how GitLab + VS Code can be used for extension development](/blog/vscode-extension-development-with-gitlab/).\n{: .alert .alert-info .text-center}\n\nThis is not a [GitLab feature](/pricing/feature-comparison/) (we're actually working on building our own integrated [web IDE](https://docs.gitlab.com/ee/user/project/web_ide/)), but the extension is a quick and easy way to perform a lot of useful actions you would usually visit [GitLab.com](https://gitlab.com/) to do, directly within your VS Code editor. Watch the demo below and read on for more about how I developed the extension.\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XcxsF0lWBhA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## First iteration of GitLab Workflow\n\nThis was my first attempt at writing a VS Code extension, and I wanted to build something simple as a first iteration. I built an extension that allowed users to see issues and merge requests assigned to them on GitLab.com. The detailed documentation and powerful APIs of VS Code enabled me to build my first version in less than two hours! It was an enjoyable experience.\n\n## Further iterations\n\nThis led to the creation of my second iteration: showing MR URLs, providing the pipeline status on the status bar, opening the current file and current MR on GitLab.com. I shared this second iteration with my fellow GitLab team-members on our internal Slack and received a lot of positive feedback. After that, I released new iterations and it got more than 5,000 installations in just a month. It was so well received that it was featured on the \"Trending this week\" section of Visual Studio Marketplace and is still currently being featured on the \"Trending this month\" section 🎉\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/trending-this-month.png\" alt=\"GitLab Workflow on Visual Studio Marketplace\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nThe current version of this extension allows you to:\n\n- See pipeline status, open MR and close issue links in the status bar. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#status-bar).\n- Automatically update pipeline status on the status bar so you don't need to open GitLab to see your pipeline status.\n- Advanced pipeline actions allow you to view a pipeline on GitLab, create a new pipeline, and retry or cancel current pipeline. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#pipeline-actions).\n- Issue and MR search including simple and advanced search. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#advanced-search).\n- View an MR and close an issue on GitLab with a single click from your status bar.\n- View an active file on GitLab with highlighting active line number and selected text block. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#open-active-file).\n- Create public, internal or private snippet from entire file or selection. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#create-snippet).\n- Compare your branch with master and view changes on GitLab. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#compare-with-master).\n- Validate GitLab CI configuration file `.gitlab-ci.yml`. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#validate-gitlab-ci-configuration).\n\nSee below for more tasks you can perform quickly with the extension.\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/gitlab-vscode.png\" alt=\"GitLab Workflow Commands\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nYou can find the source code [here](https://gitlab.com/fatihacet/gitlab-vscode-extension) and see the extension [on the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=fatihacet.gitlab-workflow). You can read the documentation [here](https://docs.gitlab.com/ee/user/project/repository/vscode.html) and check the CHANGELOG [here](https://gitlab.com/fatihacet/gitlab-vscode-extension/blob/master/CHANGELOG.md). There is also a [Product Hunt page](https://www.producthunt.com/posts/gitlab-workflow) for the extension.\n\nPhoto by [Iker Urteaga](https://unsplash.com/photos/TL5Vy1IM-uA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,721,232],{"slug":1113,"featured":6,"template":699},"gitlab-vscode-extension","content:en-us:blog:gitlab-vscode-extension.yml","Gitlab Vscode Extension","en-us/blog/gitlab-vscode-extension.yml","en-us/blog/gitlab-vscode-extension",{"_path":1119,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1120,"content":1126,"config":1133,"_id":1135,"_type":13,"title":1136,"_source":15,"_file":1137,"_stem":1138,"_extension":18},"/en-us/blog/gitlab-workflow-with-jira-jenkins",{"title":1121,"description":1122,"ogTitle":1121,"ogDescription":1122,"noIndex":6,"ogImage":1123,"ogUrl":1124,"ogSiteName":685,"ogType":686,"canonicalUrls":1124,"schema":1125},"Demo: GitLab + Jira + Jenkins","See how you can use our Jira and Jenkins integrations to reduce context switching and streamline your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680048/Blog/Hero%20Images/gitlab-jira-jenkins-cover.png","https://about.gitlab.com/blog/gitlab-workflow-with-jira-jenkins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo: GitLab + Jira + Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joel Krooswyk\"}],\n        \"datePublished\": \"2018-07-30\",\n      }",{"title":1121,"description":1122,"authors":1127,"heroImage":1123,"date":1129,"body":1130,"category":300,"tags":1131},[1128],"Joel Krooswyk","2018-07-30","\n\nOne of the things we love about GitLab is that while it can replace all your other software development lifecycle tools [(no, really)](/); it doesn't have to. Whether you want to rip and replace everything or use it for one or two stages of your workflow, [alongside your existing toolset](/partners/technology-partners/integrate/) (for now, or forever), we've got you covered.\n\nOne of the things we're most often asked about is how GitLab works together with [Jira](/solutions/jira/) for issue tracking, and [Jenkins](/solutions/jenkins/) for CI. This could be for one of two reasons:\n\n1. Your organization is happy with your issue tracking and CI solutions, and just want to use GitLab for other features, or\n2. You plan to move to GitLab for your end-to-end software development lifecycle, but that's a significant undertaking and it may be less disruptive to migrate on a project-by-project basis.\n\nNo matter the reason, what's important is maintaining the context of work without having to switch between applications frequently. With these integrations you can transition Jira issue states via GitLab, as well as see GitLab commits, branches, and merge requests in the Jira development panel. You can also view the status of Jenkins pipelines in GitLab to optimize your use of GitLab Merge Requests.\n\nI recorded this demo to show what a workflow using all three would look like.\n\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Jn-_fyra7xQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[718,9,232,1132],"workflow",{"slug":1134,"featured":6,"template":699},"gitlab-workflow-with-jira-jenkins","content:en-us:blog:gitlab-workflow-with-jira-jenkins.yml","Gitlab Workflow With Jira Jenkins","en-us/blog/gitlab-workflow-with-jira-jenkins.yml","en-us/blog/gitlab-workflow-with-jira-jenkins",{"_path":1140,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1141,"content":1146,"config":1153,"_id":1155,"_type":13,"title":1156,"_source":15,"_file":1157,"_stem":1158,"_extension":18},"/en-us/blog/gke-webcast-recap-post",{"title":1142,"description":1143,"ogTitle":1142,"ogDescription":1143,"noIndex":6,"ogImage":944,"ogUrl":1144,"ogSiteName":685,"ogType":686,"canonicalUrls":1144,"schema":1145},"Scalable app deployment with GitLab and Google Cloud Platform","Get the power to spin up a Kubernetes cluster managed by Google Cloud Platform in a few clicks – watch the demo of our native integration.","https://about.gitlab.com/blog/gke-webcast-recap-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scalable app deployment with GitLab and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-05-10\",\n      }",{"title":1142,"description":1143,"authors":1147,"heroImage":944,"date":1149,"body":1150,"category":762,"tags":1151},[1148],"Suri Patel","2018-05-10","\n\nThe GitLab + Google Kubernetes Engine integration's versatility speeds up software development and delivery while maintaining security and scale, allowing developers to focus on building apps instead of managing infrastructure. William Chia, Senior Product Marketing Manager at GitLab, and guest speaker William Denniss, Product Manager at Google, recently met to discuss the benefits of the integration.\n\n- [What is the GitLab GKE integration?](#what-is-the-gitlab-gke-integration)\n- [What's in the webcast?](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Key takeaways](#key-takeaways)\n- [Webcast Q&A](#webcast-qa)\n\n## What is the GitLab GKE integration?\n\nWith our native Google Kubernetes Engine integration, you can automatically spin up a cluster to deploy applications, with just a few clicks. Simply connect your Google account, enter a few details, and GitLab will create the clusters for you. The clusters are fully managed by Google and run on Google Cloud Platform’s best-in-class infrastructure.\n\n## What's in the webcast\n\nWilliam Chia, Senior Product Marketing Manager at GitLab, and William Denniss, Product Manager at Google, explain how to deploy applications at scale using GKE and GitLab’s robust Auto DevOps capabilities.\n\nWe start with a crash course in Kubernetes, examining containers and deployment, before taking a closer look at the [Google Kubernetes Engine integration](/partners/technology-partners/google-cloud-platform/) and seeing it in action.\n\n## Watch the recording\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uWC2QKv15mk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n#### A seamless collaboration\n\n>Using GitLab with GKE creates an environment in which you just need to merge your code, and GitLab does all the rest. - William Chia, GitLab Senior Product Marketing Manager\n\n#### Kubernetes for success\n\n>If you go with Kubernetes, it gives you a good start. You can hit a button and configure GKE to do it for you and scale massively when you need to. It really sets you up for success. GitLab is a really great way to get started with Kubernetes, because it sets up everything nicely for you in an automated way. - William Denniss, Google Product Manager\n\n## Webcast Q&A\n\nDuring the webcast, live participants chatted in questions to the team. Here are some of the answers that were given via chat along with several questions we didn’t get a chance to answer during the webcast.\n\n>Does Kubernetes have a built-in load balancer?\n\nIt does have support for load balancing across pods within a service. You may also need an external load balancer, in the event you have multiple nodes. Creating a [Kubernetes Service object](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster) and an [external load balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer) are great first steps.\n\n>Is it possible to deploy multiple projects in the same Kubernetes cluster?\n\nIt is, you can add the cluster manually to additional projects. We are also working to make this easier in our UI, with [support for defining clusters at the group level](https://gitlab.com/gitlab-org/gitlab-ce/issues/34758).\n\n>So coming back to the setup of a cluster. If you have a separate environment for development, test, acceptance, and production, it seems we would have multiple options, like multiple clusters, or one cluster with multiple environments. Or even one cluster, one environment and point the correct environment in the `.gitlab-ci.yml` file (environment page in GitLab). What do you recommend to use to have a nice CI/CD integration and still separate environments?\n\nWe support integrating multiple clusters into a single project, and you can define which environments should be deployed to which clusters by [using the environment scope](https://docs.gitlab.com/ee/user/project/clusters/#setting-the-environment-scope).\n\n>Is it possible to add several clusters to the same project? To isolate environments based on clusters rather than namespaces.\n\nYes, this is a feature of GitLab Premium/Silver. (Note: Open source projects on GitLab.com get all of the features of our top-tier plan for free. Public projects on GitLab.com also have this capability.)\n\n>Does GitLab support on-demand cluster creation for integration testing for QA environments?\n\nWe support the integration of multiple clusters, and you can define which cluster each environment should be deployed to. For example, you can state that all review apps should be deployed into one cluster. If you would like to dynamically create a cluster during a test, you of course can do that as well by scripting that in a job.\n\n>Are these features available on GitLab CE?\n\nCluster integration and the main Auto DevOps functionality are available in Core (CE or EE without a license). Some jobs do require Premium, and they are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>The test stages are paid features, right?\n\nMany test jobs are open source features available in Core, and indeed some do require an paid license. The requirements for each job are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>What did you mean: “You can run Enterprise Edition without a license?”\n\nGitLab Enterprise Edition uses a license key to grant you access to the features of the Starter, Premium, and Ultimate plans. If you install Enterprise Edition and don’t have a license key, then you will get access to all of the Core features.\n\n[Learn more about GitLab's tiers](/blog/gitlab-tiers/).\n\n[Learn if you should use Community Edition or Enterprise Edition](/install/ce-or-ee/).\n\n>Is there a free version of GKE for testing and learning?\n\nEvery new Google Cloud Platform account receives $300 in credit upon [signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab’s GKE Integration. This allows you ample usage to test and learn for free.  Visit the Google partner credit page to apply for the $200 additional credit.\n\n>I see there is a $200 credit for playing around with GitLab and GKE. Can you elaborate on that? How to receive it, etc... Is it available for personal use or for professional use only? A contact form opens that wants my professional email address.\n\nThe $200 partner credit is intended for professional use. You can apply by visiting the Google Cloud Platform [partner page](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC) and filling out the form. You'll receive an email from the Google team with a key to redeem your credit.\n\n>Will Prometheus also gather the metrics without Auto DevOps, for example our own `.gitlab-ci.yml`? Or do we need to get something from the DevOps template?\n\nWe detect common system services like the NGINX Ingress or Kubernetes CPU/Memory metrics. If you use the NGINX Ingress deployed from GitLab, it is automatically configured for exporting Prometheus metrics. Additional documentation is available in our [Prometheus documentation](https://docs.gitlab.com/ee/user/project/integrations/prometheus_library/nginx_ingress.html).\n\n>Will you also support AWS?\n\nOther providers are certainly items we are considering for future releases, but we started with GKE since we felt it has the best managed Kubernetes experience available today. Other clusters can always be added manually, with just a few extra steps.\n\n>What if GitLab is running on GKE itself, can you connect the app to the same Kubernetes cluster GitLab is running on? And how safe is it to run this auto-deployment on your existing Kubernetes clusters/cluster GitLab is running on? Looks as if you could easily waste your cluster with this.\n\nIf you’re running GitLab on GKE, you can definitely connect it to the same cluster GitLab is running on to execute your GitLab runners, and as the deployment target for Auto DevOps. I’d advise to use separate namespaces for your GitLab instance to avoid any interference.\n\nNamespaces are the key to achieving workload isolation in Kubernetes; they provide isolation between different deployments to avoid one accidentally influencing the other. If you like (and it’s a bit more configuration), you can even use RBAC to prevent any developer pipelines from ever touching production.\n\nIf you want total isolation, then create a separate GCP project, with a separate cluster for production :) This is definitely the best practice for larger deployments.\n\n>I have been playing around with the `dependency_scanning`/`sast`/`dast` jobs, but the images are not cached on the runner. Will they be cached in (near) future or do we need to add any configuration?\n\nWe use Docker-in-Docker for most of these jobs, so caching is a bit tricky, and we have an [issue tracking this](https://gitlab.com/gitlab-org/gitlab-ce/issues/17861).\n\n>What does GitLab use to create the container image?\n\nAuto DevOps uses Herokuish and Heroku buildpacks to automatically detect and build the application into a Docker image. If you add a Dockerfile to your repo, GitLab will use docker build to create a Docker image.\n\n>Does the GKE/Kubenetes integration require the GitLab installation to be publicly accessible from the internet? Or will it work just as well if the GitLab server is private?\n\nIt does not, but if you deploy a runner to the cluster it will need to be able to access the GitLab server to pick up jobs and do its Git clones.\n\n>How does one manage to different `.env` files for different environments with GitLab CI?\n\nIf you define environment variables at the project level, you can specify which ones are available for which environments by following the [documentation on limiting environment scopes](https://docs.gitlab.com/ee/ci/variables/#limiting-environment-scopes-of-secret-variables).\n\n>What do I do when I receive this error: “We could not verify that one of your projects on GCP has billing enabled. Please try again.”\n\nPlease read the second bullet on the [GCP billing on the documentation page](https://docs.gitlab.com/ee/user/project/clusters/#adding-and-creating-a-new-gke-cluster-via-gitlab), which should help ensure that billing is set up for your account.\n\n>Is there a setting to control the number of review apps which are running live at any given time? Worried about cost.\n\nNote that review apps only run on open Merge Requests. If you are using the Auto DevOps template, then once the code is merged, or the MR is closed, the review app shuts down. Today, there’s not a feature to limit the number of review apps, but there are a few options. Review app environments can be manually stopped from both the MR and the environments page. You can also disable review apps altogether.\n\n>What are requirements for installing the one-click applications to the cluster?\n\nHelm Tiller, Ingress, Prometheus, and GitLab Runner don't have any special requirements to install via one-click. The integration takes care to ensure the appropriate container images are used and everything is configured properly. The only prerequisite is to install Helm Tiller first (since it is used to install the other applications.) If you install these applications manually to your cluster, you can learn about the requirements for each on their respective documentation pages.\n\n>Does this replace solutions like Rancher?\n\nIn a nutshell, yes, the GitLab GKE integration provisions and manages clusters on GKE, alleviating the need for Rancher. But this also depends on your needs. You can use GitLab with or without Rancher. For example, if you are using AKS or EKS, then Rancher will provision and manage your cluster automatically, while this requires manual configuration on GitLab.\n\n>What is the current state of installing GitLab on Kubernetes?\n\nGitLab has two Helm charts for installing GitLab on Kubernetes – the GitLab-Omnibus chart and the cloud native GitLab chart.\n\nGitLab-Omnibus: The best way to run GitLab on Kubernetes today, suited for small deployments. The chart is in beta and will be deprecated by the cloud native GitLab chart.\nCloud native GitLab chart: The next generation GitLab chart, currently in alpha. Will support large deployments with horizontal scaling of individual GitLab components. For more information, please visit [the GitLab Helm chart documentation page](https://docs.gitlab.com/charts/).\n\n>How usable is the new Helm chart for GitLab on Kubernetes?\n\nIt is in alpha, and we plan to have a beta available in May/June. We created [an issue](https://gitlab.com/groups/charts/-/epics/17) to note the items we are working to address before beta.\n\n>How can I enable Auto DevOps if I have `gitlab-ci.yml` file already, but for only build and test?\n\nAuto DevOps will use your custom `gitlab-ci.yml` file if it is present in your repo. If there is no file, then Auto DevOps will use the default Auto DevOps template. You can also see the [Auto DevOps template `gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ci-yml/blob/master/Auto-DevOps.gitlab-ci.yml) and use it as a reference to add/update your `gitlab-ci.yml`. For more information, please visit [the customizing `.gitlab-ci.yml` documentation page](https://docs.gitlab.com/ee/topics/autodevops/#customizing-gitlab-ci-yml).\n\nHave you tried the GitLab + GKE integration? Tweet us [@gitlab](https://twitter.com/gitlab).\n",[954,953,720,1152,9],"webcast",{"slug":1154,"featured":6,"template":699},"gke-webcast-recap-post","content:en-us:blog:gke-webcast-recap-post.yml","Gke Webcast Recap Post","en-us/blog/gke-webcast-recap-post.yml","en-us/blog/gke-webcast-recap-post",{"_path":1160,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1161,"content":1167,"config":1173,"_id":1175,"_type":13,"title":1176,"_source":15,"_file":1177,"_stem":1178,"_extension":18},"/en-us/blog/how-to-agentless-gitops-aws",{"title":1162,"description":1163,"ogTitle":1162,"ogDescription":1163,"noIndex":6,"ogImage":1164,"ogUrl":1165,"ogSiteName":685,"ogType":686,"canonicalUrls":1165,"schema":1166},"How to Use Push-Based GitOps with Terraform & AWS ECS/EC2","Learn how GitLab supports agentless approach for GitOps on AWS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/how-to-agentless-gitops-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-08-10\",\n      }",{"title":1168,"description":1163,"authors":1169,"heroImage":1164,"date":1170,"body":1171,"category":762,"tags":1172},"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2",[738],"2021-08-10","In [part two of our GitOps\nseries](/blog/how-to-agentless-gitops-vars/), we described how to\nuse a push-based (or agentless) approach for [GitOps](/topics/gitops/) by\nusing GitLab scripting capabilities as well as integrating\ninfrastructure-as-code tools into GitOps pipelines. In this third blog post,\nwe’ll also dig deep into how to use a push-based approach, but this time our\nfocus will be on the integrations of Terraform, AWS ECS, and AWS EC2 in\nGitOps flows. This approach may be preferable when using infrastructure\ncomponents that aren't Kubernetes, such as VMs, physical devices, and\ncloud-provider services.\n\n\nSimilar to Ansible – an agentless IT automation solution – Terraform can be\nleveraged by the scripting capabilities of GitLab to shape your\ninfrastructure. GitLab also provides out-of-the-box integrations with\nTerraform, such as GitLab-managed Terraform state and Terraform plan reports\nin merge requests.\n\n\n## GitOps flows with GitLab and Terraform\n\n\nIn this section, we explain how to use GitLab and Terraform for a\nnon-Kubernetes GitOps flow and Kubernetes GitOps.\n\n\n### GitLab and Terraform for non-K8s infrastructure\n\n\nGitLab leverages Terraform to provision a non-Kubernetes infrastructure\ncomponent, namely a MySQL database running on AWS.\n\n\nNote: Ideally, the provisioning of a database should be an on-demand,\nself-service process that developers can just use. We use this scenario to\nillustrate a GitOps flow using a non-Kubernetes infrastructure component.\n\n\n#### How collaboration works in GitLab\n\n\nSasha, a developer, creates an issue and assigns the issue to Sidney, the\ndatabase administrator, who then creates a Merge Request (MR) to start her\nwork and invite collaboration with other stakeholders across the\norganization. Opening the MR automatically creates a feature branch for the\nGitLab project. Sidney uses Terraform to create an infrastructure-as-code\nconfiguration for the database, named `mysqlmain.tf`. The database happens\nto be an AWS RDS MySQL instance. The database Terraform configuration file\nshould look like this:\n\n\n![Terraform configuration file for MySQL\ndatabase](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/0-tf-mysqlmain-created.png){:\n.shadow.small.center.wrap-text}\n\nTerraform configuration file for MySQL database.\n\n{: .note.text-center}\n\n\nTake note of the version of the database (`engine_version`), the database\nstorage (`allocated_storage`), and the embedded database admin user\n(`username`) and password, in the image above.\n\n\nAs soon as Sidney adds the file `mysqlmain.tf` file to the feature branch, a\npipeline is automatically executed by GitLab in the MR. As part of the\nreview process, a \"Terraform plan\" is executed against the Terraform files\nand the output is attached to the MR as an artifact:\n\n\n![Terraform plan output attached to Merge\nRequest](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/1-tf-report-in-MR.png){:\n.shadow.small.center.wrap-text}\n\nTerraform plan output attached to MR.\n\n{: .note.text-center}\n\n\nIn the picture above, you can see the note \"1 Terraform report was generated\nin your pipelines\". You can click on the `View full log` button to see the\noutput file of the \"Terraform plan\" command that was run against the new\nconfiguration file, as seen below:\n\n\n![Terraform plan output detailed log\nview](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/2-tf-plan-output.png){:\n.shadow.small.center.wrap-text}\n\nTerraform plan output detailed log view.\n\n{: .note.text-center}\n\n\nThe Terraform output shows that a database will be created once this\nconfiguration file is applied to the infrastructure. The artifacts attached\nto an MR provide information that can help stakeholders review the proposed\nchanges. The Terraform output in the MR fosters collaboration between\nstakeholders, and leads to infrastructure that is more consistent,\nresilient, reliable, and stable, and helps prevent unscheduled outages.\n\n\nIn the image below, we see how reviewers can collaborate in GitLab. The\nscreenshow shows that the original requester, Sasha, notices that a database\nstorage of 5 GB is too small, so she makes an inline suggestion to increase\nthe database storage capacity to 10 GB.\n\n\n![Inline suggestion to increase database storage to\n10GB](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/3-tf-inline-suggestion-by-Sasha.png){:\n.shadow.small.center.wrap-text}\n\nInline suggestion to increase database storage to 10GB.\n\n{: .note.text-center}\n\n\nInline suggestions foster collaboration and help increase developer\nproductivity suggested changes can be added with the click of a button.\n\n\nNext, Sidney invites DevOps engineer Devon to collaborate on the MR. Devon\nnotices that the database version in the configuration file is not the\nlatest one. He proceeds to make an inline suggestion proposing a more\nup-to-date version for Sidney to review:\n\n\n![Inline suggestion to update database\nversion](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/4-tf-inline-suggestion-by-Devon.png){:\n.shadow.small.center.wrap-text}\n\nInline suggestion to update database version.\n\n{: .note.text-center}\n\n\nSidney can monitor the discussion between code reviewers on the MR by\ntracking the number of unresolved threads. So far, there are four unresolved\nthreads:\n\n\n![Number of unresolved threads displayed at the top of the\nMR](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/5-tf-unresolved-threads-for-Sidney.png){:\n.shadow.small.center.wrap-text}\n\nNumber of unresolved threads displayed at the top of the MR.\n\n{: .note.text-center}\n\n\nSidney starts resolving the threads by following the convenient thread\nnavigation provided by GitLab, which makes it easy for her to process each\nof the proposed review items. Sidney just needs to click \"Apply suggestion\"\nto accept an input from a reviewer:\n\n\n![Applying a suggestion with a single button\nclick](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/6-tf-apply-inline-suggestion-by-Sidney.png){:\n.shadow.small.center.wrap-text}\n\nApplying a suggestion with one click.\n\n{: .note.text-center}\n\n\nDevon suggested replacing the embedded database admin username and password\nwith a parameter in the inline review, so Sidney replaces the embedded\nvalues with variables. The variable values will be managed by masked\nvariables within GitLab:\n\n\n![Parameterizing variables in Terraform configuration\nfile](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/7-tf-parameterizing-vars-by-Sidney.png){:\n.shadow.small.center.wrap-text}\n\nParameterizing variables in Terraform configuration file.\n\n{: .note.text-center}\n\n\nOnce the threads are resolved and the stakeholders involved in thh MR finish\ncollaborating, it's time to merge.\n\n\nLearn more about how GitLab fosters collaboration using the principles of\nGitOps in the video below:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/onFpj_wvbLM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n\nIn this next example, Sasha is the one merging the MR:\n\n\n![Merge Request with infrastructure updates being\nmerged](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/8-tf-MR-merged.png){:\n.shadow.small.center.wrap-text}\n\nMR with infrastructure updates being merged.\n\n{: .note.text-center}\n\n\nMerging automatically launches a pipeline that will apply the changes to the\ninfrastructure:\n\n\n![GitOps pipeline completed\nexecution](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/9-tf-pipeline-complete.png){:\n.shadow.small.center.wrap-text}\n\nGitOps pipeline completed execution.\n\n{: .note.text-center}\n\n\n#### CI/CD with non-K8s infrastructure\n\n\nThe CI/CD pipeline in the previous example works by validating the\ninfrastructure configuration files. Then the pipeline validates the proposed\nupdates against the current state of the infrastructure. Finally, it applies\nthe updates to the production infrastructure.\n\n\nRunning this GitOps flow results in a brand new MySQL database on AWS RDS:\n\n\n![A new MySQL database has been created via a GitOps\nflow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/10-db-ready.png){:\n.shadow.small.center.wrap-text}\n\nA new MySQL database has been created via a GitOps flow.\n\n{: .note.text-center}\n\n\nBy checking the details of the new MySQL database you can corroborate that\nthe database storage is 10 GB and that the database version is the most\ncurrent\"\n\n\n![Resulting MySQL database configuration from the collaboration of\nstakeholders](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/11-db-version-and-10g-storage.png){:\n.shadow.small.center.wrap-text}\n\nThe MySQL database configuration built by team member collaboration.\n\n{: .note.text-center}\n\n\nIn the next section, we look at how a similar GitOps flow can be applied to\na Kubernetes cluster.\n\n\n### GitLab and Terraform for K8s infrastructure\n\n\nWe skip past all the collaboration steps to focus on a change to the EKS\ncluster Terraform configuration file. In the picture below, a user is\nchanging the minimum size of the autoscaling group of the EKS cluster from\none to two:\n\n\n![Raising autoscaling group minimum to\n2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/12-worker-nodes-to-two.png){:\n.shadow.small.center.wrap-text}\n\nIncreasing autoscaling group minimum to two.\n\n{: .note.text-center}\n\n\nWhen the stakeholder commits the change in the MR, a CI/CD pipeline\nvalidates the configuration, performs a plan against production, and applys\nthe updates to the production infrastructure. After the pipeline finishes,\nthe user can log into the Amazon EC2 console to verify that the EKS cluster\nnow has a minimum of two nodes in its autoscaling group:\n\n\n![GitOps flow modified the number of worker nodes in K8s\ncluster](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/13-two-worker-nodes-on-AWS.png){:\n.shadow.small.center.wrap-text}\n\nGitOps flow modified the number of worker nodes in K8s cluster.\n\n{: .note.text-center}\n\n\nSee this scenario in action by watching the [GitOps\npresentation](/topics/gitops/gitops-multicloud-deployments-gitlab/) on our\nGitOps topics page.\n\n\n## GitOps flows for non-K8s (like ECS, EC2)\n\n\nGitLab also provides Auto Deploy capabilities to streamline application\ndeployment to ECS and EC2, so you can shape infrastructure as desired.\n\n\n### Deploying to Amazon ECS\n\n\nAfter creating your ECS cluster, GitLab can deliver your application and its\ninfrastructure to the cluster by including the ECS Deployment template in\nyour `gitlab-ci.yml`, using CI/CD.\n\n\n```\n\ninclude:\n\nTemplate: AWS/Deploy-ECS.gitlab-ci.yml\n\n```\n\n\nNext, create the `ECS Task Definition` file in your project that specifies\nyour app's infrastructure requirements, along with other details.\n\n\n![ECS Task Definition file\nsnippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/14-ECS-taskdef-file.png){:\n.shadow.small.center.wrap-text}\n\nECS Task Definition file snippet.\n\n{: .note.text-center}\n\n\nFinally, define the project variable that will drive the template:\n\n\n![Project variables required to auto-deploy to\nECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/15-ECS-related-vars.png){:\n.shadow.small.center.wrap-text}\n\nProject variables required to auto-deploy to ECS.\n\n{: .note.text-center}\n\n\nThe ECS deployment template does the rest, including support review\npipelines.\n\n\n![Review pipeline in GitOps\nflow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/16-ECS-review-pipeline.png){:\n.shadow.small.center.wrap-text}\n\nReview pipeline in GitOps flow.\n\n{: .note.text-center}\n\n\nIn the review pipeline above, stakeholders can review the proposed changes\nbefore sending to production. The two screenshots below show different\naspects of the proposed changes in the log output of the `review_fargate`\njob:\n\n\n![Configuring load balancers in\nECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/17-review-fargate-log-begin.png){:\n.shadow.small.center.wrap-text}\n\nConfigure load balancers in ECS.\n\n{: .note.text-center}\n\n\nSee the configuration for infrastructure components like load balancers in\nthe image above. The image below shows infrastructure components like\nsubnets, security groups, and the assignment of a public IP address:\n\n\n![Configuring subnets, security groups in\nECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/18-review-fargate-log-middle.png){:\n.shadow.small.center.wrap-text}\n\nConfiguring subnets and security groups in ECS.\n\n{: .note.text-center}\n\n\nOnce all stakeholders are done collaborating on a proposed change to the\nproduction infrastructure, the updates are applied using a CI/CD pipeline.\nBelow is an example of this type of pipeline:\n\n\n![Applying infrastructure updates to\nproduction](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/19-ECS-prod-pipeline.png){:\n.shadow.small.center.wrap-text}\n\nApplying infrastructure updates to production.\n\n{: .note.text-center}\n\n\nRead our documentation to learn more about [how GitLab users can Auto Deploy\nto\nECS](https://docs.gitlab.com/ee/ci/cloud_deployment/#deploy-your-application-to-the-aws-elastic-container-service-ecs).\n\n\n### Deploying to Amazon EC2\n\n\nGitLab also provides a built-in template to provision infrastructure and\ndeploy your applications to EC2 as part of Auto DevOps. The template:\n\n\n- Provisions infrastructure using AWS CloudFormation\n\n- Pushes application to S3\n\n- Deploys your application from S3 to EC2\n\n\nEach of these steps requires a JSON configuration file. Below is an example\nof a portion of a CloudFormation Stack JSON file used to create your\ninfrastructure:\n\n\n![CloudFormation stack JSON\nsnippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/20-EC2-portion-stack-file.png){:\n.shadow.small.center.wrap-text}\n\nCloudFormation stack JSON snippet.\n\n{: .note.text-center}\n\n\nThe JSON used by the Auto Deploy template to push your application to S3\nwould look similar to this:\n\n\n![JSON to push application to\nS3](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/21-EC2-push-file.png){:\n.shadow.small.center.wrap-text}\n\nJSON to push application to S3.\n\n{: .note.text-center}\n\n\nAnd the file used for the actual deployment of your application from S3 to\nEC2 would be like the following:\n\n\n![JSON to deploy application to\nEC2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/22-EC2-deploy-file.png){:\n.shadow.small.center.wrap-text}\n\nJSON to deploy application to EC2.\n\n{: .note.text-center}\n\n\nAfter creating these files, you need to create the following variables in\nyour project - displayed here with some sample values:\n\n\n```\n\nvariables:\n  CI_AWS_CF_CREATE_STACK_FILE: 'aws/cf_create_stack.json'\n  CI_AWS_S3_PUSH_FILE: 'aws/s3_push.json'\n  CI_AWS_EC2_DEPLOYMENT_FILE: 'aws/create_deployment.json'\n  CI_AWS_CF_STACK_NAME: 'YourStackName'\n```\n\n\nThe last step is to include the template in your `.gitlab-ci.yml` file:\n\n\n```\n\ninclude:\n  - template: AWS/CF-Provision-and-Deploy-EC2.gitlab-ci.yml\n```\n\n\nMore details on [how GitLab uses Auto Deploy to EC2 are available in the\ndocumentation](https://docs.gitlab.com/ee/ci/cloud_deployment/#provision-and-deploy-to-your-aws-elastic-compute-cloud-ec2).\n\n\n## Agent or agentless: GitLab has your GitOps flows covered\n\n\nWhether your situation calls for an agent-based/pull-approach to doing\nGitOps, or for an agentless/push-approach, GitLab has your back. GitLab\noffers the flexibility to choose the approach to GitOps that best fits your\nspecific projects or applications. GitLab also supports many types of\ninfrastructures – from physical components and virtual machines, Kubernetes\nand containers, as well as infrastructure-as-code tools like Terraform,\nAnsible, and AWS Cloud Formation.\n",[549,743,9],{"slug":1174,"featured":6,"template":699},"how-to-agentless-gitops-aws","content:en-us:blog:how-to-agentless-gitops-aws.yml","How To Agentless Gitops Aws","en-us/blog/how-to-agentless-gitops-aws.yml","en-us/blog/how-to-agentless-gitops-aws",{"_path":1180,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1181,"content":1187,"config":1193,"_id":1195,"_type":13,"title":1196,"_source":15,"_file":1197,"_stem":1198,"_extension":18},"/en-us/blog/how-to-agentless-gitops-vars",{"title":1182,"description":1183,"ogTitle":1182,"ogDescription":1183,"noIndex":6,"ogImage":1184,"ogUrl":1185,"ogSiteName":685,"ogType":686,"canonicalUrls":1185,"schema":1186},"Using push-based GitOps with GitLab scripts and variables","Learn how GitLab supports agentless approach for GitOps with scripting and variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682051/Blog/Hero%20Images/agentless-gitops-vars-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-agentless-gitops-vars","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with GitLab scripting and variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-07-23\",\n      }",{"title":1188,"description":1183,"authors":1189,"heroImage":1184,"date":1190,"body":1191,"category":762,"tags":1192},"How to use a push-based approach for GitOps with GitLab scripting and variables",[738],"2021-07-23","\n\nIn [part one](/blog/how-to-use-agent-based-gitops/) of our GitOps series, we described how to use a pull-based (or agent-based) approach. In this second blog post, we'll dig deep into how to use a push-based approach. The agentless approach may be preferable for situations with non-Kubernetes infrastructure components or when you don't want to install, run, and maintain agents in each infrastructure component for [GitOps](/topics/gitops/). In this post, we will discuss how the scripting capabilities of GitLab can be used in GitOps workflows, and how to use predefined GitLab variables to shape infrastructure components.\n\n## About a push-based or agentless approach\n\nWith the agentless approach, infrastructure expressed and managed as code on GitLab, and updates and drift detection are automated and handled by GitLab without having to install any agents on infrastructure components.\n\n## How to use scripting in your pipelines to shape infrastructure\n\nGitLab allows automation using scripting. Whether you're using Docker, Helm, Ansible, or even direct SSH commands, you can use the scripting capabilities of GitLab to create, shape, and modify infrastructure.\n\nIn the example below, the pipeline determines the shape of the infrastructure the application runs on by specifying a Docker image as well as running Docker commands to build and push an application to the GitLab built-in container registry.\n\n![Using Docker in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/0-docker-use-in-pipeline.png){: .shadow.small.center.wrap-text}\nHow to use Docker in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe infrastructure is shaped again at a later stage in the pipeline, but this time by using kubectl and Helm commands:\n\n![Using kubectl in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/1-helm-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use kubectl in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nDepending on the type of infrastructure, other technologies can be used to shape the infrastructure. In the next example, Ansible is used to run a playbook that sets up the infrastructure for an entire lab environment:\n\n![Using Ansible in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/2-ansible-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use Ansible in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe scripting capabilities of GitLab pipelines combined with GitLab's CI/CD capabilities allow users to create GitOps flows to manage Infrastructure as Code (IaC), which delivers more resilient infrastructure and less risk of unscheduled downtime.\n\n## How to use Auto DevOps to modify infrastructure using variables\n\nGitLab also allows users to shape infrastructure by using project or group variables. The number of production pods in a Kubernetes cluster is updated to four in the example below:\n\n![Using variables to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/3-ado-modify-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nHow to use variables to shape infrastructure.\n{: .note.text-center}\n\nThe number of the production pods are changed to four on the next execution of the pipeline:\n\n![Production pods increased via a variable update](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/4-ado-modified-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nProduction pods changed using a variable update.\n{: .note.text-center}\n\nThere are many GitLab [build and deployment variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#build-and-deployment) that can modify infrastructure. [PostgreSQL](https://www.postgresql.org/) is provisioned as a component in infrastructure by default in GitLab to support applications that require a database and also provides [these variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#database) to customize it.\n\n## How GitLab capabilities help agentless infrastructure\n\nThe scripting capabilities of GitLab are a convenient way to shape infrastructure components in GitOps workflows using a push-based approach. This method allows for the easy integration of IaC tools in your GitOps pipelines. If you are doing IaC and GitOps for non-Kubernetes infrastructure components, this is the best approach. GitLab also provides out-of-the-box variables, so users can impact selected infrastructure components. In the final part of this GitOps series, we will discuss an agentless approach using our integration to Terraform as well as examples of GitOps flows for AWS ECS and EC2.\n\nCover image by [Rod Long](https://unsplash.com/@rodlong?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/machu-picchu?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n\n\n",[549,743,9],{"slug":1194,"featured":6,"template":699},"how-to-agentless-gitops-vars","content:en-us:blog:how-to-agentless-gitops-vars.yml","How To Agentless Gitops Vars","en-us/blog/how-to-agentless-gitops-vars.yml","en-us/blog/how-to-agentless-gitops-vars",{"_path":1200,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1201,"content":1207,"config":1214,"_id":1216,"_type":13,"title":1217,"_source":15,"_file":1218,"_stem":1219,"_extension":18},"/en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"title":1202,"description":1203,"ogTitle":1202,"ogDescription":1203,"noIndex":6,"ogImage":1204,"ogUrl":1205,"ogSiteName":685,"ogType":686,"canonicalUrls":1205,"schema":1206},"How to prevent broken master with merge trains & pipelines","Do you still run pipelines on source branches? Let's start running them on merge commits!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678366/Blog/Hero%20Images/merge-train.jpg","https://about.gitlab.com/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid broken master with Pipelines for Merged Results and Merge Trains\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Shinya Maeda\"}],\n        \"datePublished\": \"2019-09-11\",\n      }",{"title":1208,"description":1203,"authors":1209,"heroImage":1204,"date":1211,"body":1212,"category":762,"tags":1213},"How to avoid broken master with Pipelines for Merged Results and Merge Trains",[1210],"Shinya Maeda","2019-09-11","\nBroken master. This can happen when CI pipelines run on the master branch (or default branch), but don't\npass all tests. A red cross mark is shown in the project's top page, signalling unstable source\ncode and eroding the trust of users. Broken master could also be a blocker against\na continuous deployment/delivery stream line in which deployment jobs\nare executed after the test stage passed in master pipelines.\n\nAll maintainers want to avoid this critical state,\nbut how can we prevent it?\n\n## Let's look at how master is broken in the first place\n\nLet's say you're one of the maintainers of a project. It's a busy repository with hundreds of merges\nto master every day. A developer assigns a merge request (MR) to you. The MR passed all of the tests in the CI pipelines,\nhas been reviewed thoroughly by code reviewers, all open discussions have been resolved, and the MR has been\napproved by the relevant [code owners](https://docs.gitlab.com/ee/user/project/codeowners/).\n\nYou would press the \"Merge\" button without a second thought, but how are you confident that\na pipeline running on master branch after the merge will pass all tests again?\nIf your answer is \"It might break the master branch,\" then\nyou're right. This could happen, for example, if master has advanced by some\nnew commits, and one of them changed a lint rule. The MR in question\nstill contains an invalid coding style, but the latest pipeline on the MR passes,\nbecause the feature branch is based on an old version of master.\n\nEnter two new GitLab features: [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nand [Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html).\nLet me show you how they works and how to enable them.\n\n## How to continually run CI pipelines on the merge commit\n\nLet's break down what went wrong in the scenario above. Even though the pipeline on the\nmerge request passed all the tests, it ran on a source (feature) branch\nwhich could be based on an outdated version of master. In such a case,\nthe result of pipeline is considered as _untrusted_, because there may be a huge difference\nbetween an actual-and-future merge commit and the commit in question.\n\nAs a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), developers can continually rebase their MR\non the latest master, but this is annoying and inefficient, given the speed of\ngrowth of the master branch.\nIt causes a lot of friction between developers and maintainers, slowing down the development cycle.\n\nTo address this problem, we introduced [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nin [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/#pipelines-for-merged-results).\n\nSimply put, the main difference between pipelines for merged results and normal pipelines is that\n**pipelines run on merge commits, instead of source branches, before the actual merge happens**.\nThis merge commit is generated from the latest commits of target branch and\nsource branch and written in a temporary place (`refs/merge-requests/:iid/merge`).\nTherefore, we can run a pipeline on it without interfering with master.\n\nHere is a sample workflow with the above scenario:\n\n1. A developer pushes a new commit to a merge request.\n1. GitLab creates a merge commit from the HEAD of the source branch and HEAD of the target branch.\n   This merge commit is written in `refs/merge-requests/:iid/merge` and does not change commit history of master branch.\n1. GitLab creates a pipeline on the merge commit, but this pipeline fails because the latest master changed a lint rule.\n1. A maintainer sees a failed pipeline in the merge request.\n\nAs you can see, the maintainer was able to hold off merging the dangerous MR\nbecause the latest pipeline on the MR didn't pass. The feature actually saved\nmaster from a broken state.\n\nAs a bonus, this workflow freeds developers from continual\nrebasing of their merge requests.\nAll they need to do is develop features with [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html).\nGitLab automatically creates an expected merge commit and validates the merge request prior to\nan actual merge.\n\n### How to get started with Pipelines for Merged Results\n\nYou can [start using this feature](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results)\ntoday, with just two steps:\n\n1. Edit the `.gitlab-ci.yml` config file to enable [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/merge_request_pipelines/).\n1. Enable the \"Merge pipelines will try to validate the post-merge result prior to merging\" option at **Settings > General > Merge requests** in your project.\n\n**Note:** If the configurations in your `.gitlab-ci.yml` file are too complex, you might stumble at the first point.\nWe're currently working on [improving the usability of pipelines for merge requests / merge request pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085).\nPlease leave your feedback in the issue if that's the case.\n\n## How to avoid race condition of concurrent merges\n\nWith [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html),\nwe can confidently say that MRs are continually tested against the latest master branch.\nHowever, what if multiple MRs have been merged at the same time?\nFor example:\n\n- There are two merge requests: MR-1 and MR-2. The latest pipelines have already passed in both MRs.\n- John (maintainer) and Cathy (maintainer) merge MR-1 and MR-2 at the same time, respectively.\n\nLater on, it turns out that MR-2 contains a coding offence which has just been introduced by MR-1.\nMaintainers hit merge without knowing that, and\nneedless to say, this will result in broken master. How can we handle this race condition properly?\n\nIn [GitLab 12.1](/releases/2019/07/22/gitlab-12-1-released/#parallel-execution-strategy-for-merge-trains), we introduced a new feature,\n[Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/).\nBasically, a Merge Train is a queueing system that allows you to avoid this kind\nof race condition.\nAll you need to do is add merge requests to the merge train, and it\nhandles the rest of the work for you.\nIt creates merge commits according\nto the sequence of merge requests and runs pipelines on the expected merge commits.\nFor example, John and Cathy could have avoided broken master with the following workflow:\n\n1. John and Cathy add MR-1 and MR-2 to their [Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/), respectively.\n1. In MR-1, the Merge Train creates an expected merge commit from HEAD of the source branch and HEAD of the target branch.\n   It creates a pipeline on the merge commit.\n1. In MR-2, the Merge Train creates an expected merge commit from HEAD of the source branch and the expected merge commit of MR-1.\n   It creates a pipeline on the merge commit.\n1. The pipeline in MR-1 passes all tests and merged into master branch.\n1. The pipeline in MR-2 fails because it violates a lint check which was changed by MR-1. MR-2 is dropped from the Merge Train.\n1. Developer revisits MR-2, fixes the coding offence, and asks Cathy to add it to the Merge Train again.\n\nAs you can see, the Merge Train successfully rejected MR-2 before it could break the master\nbranch. With this workflow, maintainers can feel more confident when they\ndecide to merge something. Also, this doesn't slow down development lifecycle\nthat pipelines are built on optimistic assumption that, in the above case,\nthe pipeline in MR-1 and the pipeline in MR-2 **start almost simultaneously**.\nMR-2 builds a merge commit as if MR-1 has already been merged, so that maintainers\ndon't need to wait for long time until each pipeline finished. If one of the\npipelines failed, the problematic merge request is dropped from the merge train\nand the train will be reconstructed without it.\n\n### How to get started with Merge Trains\n\nYou can [start using Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train)\ntoday, if you've already enabled [Pipelines for merged results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results). Click [\"Start/Add merge train\" button](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train) in merge requests.\n\n## A quick demonstration of Merge Trains\n\nHere is a demonstration video that explains the advantage of Merge Train feature.\nIn this video, we'll simulate the common problem in a workflow without\nMerge Trains, and later, we resolve the problem by enabling a Merge Train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## Wrap up\n\nRunning pipelines on expected merge commits allows us to predict what will happen\nin the future and avoid broken master proactively. It soothes the headache of\nrelease managers and gives maintainers and developers more confidence that their code\nis reliable enough to be merged and shipped. In addition, Merge Trains allow you\nto merge things safely without slowing down the development cycle.\n\nGive this advanced CI/CD feature a try today!\n\nFor more information, check out [the documentation on merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) and [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nCover image by [Dan Roizer](https://unsplash.com/@danny159) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[108,9,1132,869],{"slug":1215,"featured":6,"template":699},"how-to-avoid-broken-master-with-pipelines-for-merge-requests","content:en-us:blog:how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","How To Avoid Broken Master With Pipelines For Merge Requests","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"_path":1221,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1222,"content":1228,"config":1234,"_id":1236,"_type":13,"title":1237,"_source":15,"_file":1238,"_stem":1239,"_extension":18},"/en-us/blog/how-to-setup-gitlab-for-multiple-product-teams",{"title":1223,"description":1224,"ogTitle":1223,"ogDescription":1224,"noIndex":6,"ogImage":1225,"ogUrl":1226,"ogSiteName":685,"ogType":686,"canonicalUrls":1226,"schema":1227},"Managing multiple product categories in GitLab","Exploring issue mangement options for product teams that are all contributing to a single repository.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680898/Blog/Hero%20Images/filing-cabinet.jpg","https://about.gitlab.com/blog/how-to-setup-gitlab-for-multiple-product-teams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing multiple product categories in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabe Weaver\"}],\n        \"datePublished\": \"2019-12-05\",\n      }",{"title":1223,"description":1224,"authors":1229,"heroImage":1225,"date":1231,"body":1232,"category":300,"tags":1233},[1230],"Gabe Weaver","2019-12-05","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-30.\n{: .alert .alert-info .note}\n\nIn a [recent tweet](https://twitter.com/mrguillaum/status/1202530376415088641), a member of the wider GitLab community asked how to set up GitLab so multiple product teams, each with their own unique workflows, could contribute effectively to a single repository. Let's explore some options.\n\n## GitLab's approach\n\nGiven that GitLab is growing very quickly, we've had to put a lot of thought into organizational structure and mapping to create a well structured workflow within GitLab. The entirety of the product surface area is comprised of [seven distinct product sections](https://handbook.gitlab.com/handbook/product/categories/). Each section consists of several [stages](https://handbook.gitlab.com/handbook/product/categories/#hierarchy), and each stage contains several categories. Our cross-functional product teams are called groups and the groups typically own a handful of categories within a stage. For example, [I'm the senior product manager for the Project Management group](/company/team/#gweaver), which is responsible for the [issue tracking](https://docs.gitlab.com/ee/user/project/issues/), [Kanban boards](https://docs.gitlab.com/ee/user/project/issues/index.html#issue-boards), and [time tracking](https://docs.gitlab.com/ee/user/project/time_tracking.html#time-tracking) categories. There are two other groups within the Plan stage.\n\nWhile each product team can technically have their own workflow, we've deliberately tried to standardize across teams. We accomplish this primarily through leveraging group level issue boards and labels. I've created a [demo group](https://gitlab.com/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach) where you can see the our basic approach setup directly within GitLab. The first step to get up and running is to configure the labels. Within our top level group, we've created specific label sets to help us organize our issues, MRs, and issue boards.\n\n### Labels for managing ownership and surface area:\n\n- `stage::name` denotes which issues belong to a given stage. In the demo, I created the `stage::plan` label. This is especially useful for filtering issue boards. By using the [scoped label](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) syntax, we get mutually exclusive labels so an issue or MR can only ever be assigned to a single stage.\n- `group::name` denotes which issues belong to a given cross-functional product team. In the demo, I created `group::project management`, `group::portofolio management`, and `group::certify`, which are the actual groups within GitLab's Plan stage. Again, the use of scoped labels assures mutual exclusivity.\n- `category::name` denotes which issues belong to a given category within a stage. In the demo, I created `category::epics`, `category::issue boards`, `category::issue tracking`, `category::requirements management`, `category::roadmaps`, and `category::service desk`, which are the main categories within the Plan stage.\n\n### Labels for tracking workflow and issue types:\n\n- `type::name` denotes the type of issue. In the demo, I've created `type::debt`, `type::feature`, and `type::defect`. Given an issue can only ever be one type, the use of the scoped label syntax is best.\n- `workflow::state` denotes which workflow state an issue is in. In the demo, I've created `workflow::validation backlog`, `workflow::problem validation`, `workflow::solution validation`, `workflow::planning breakdown`, `workflow::scheduling`, `workflow::ready for dev`, `workflow::in dev`, `workflow::review`, and `workflow::verification`. You can design your workflow however you want, but it is helpful to have a [discussion](https://gitlab.com/gitlab-org/plan/issues/34) with your team to clarify transitions from one workflow state to another.\n\nWith our labels in place, we can now spin up some group level issue boards for the different product teams. The [Project Management team's issue board](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438121?&label_name[]=group%3A%3Aproject%20management&label_name[]=stage%3Aplan) uses a common naming convention and is scoped to only include issues that contain the `group::project management` and `stage::plan` labels. The lists are set up according to the `workflow::*` labels we defined earlier. The [Portfolio Management](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438186?&label_name[]=group%3A%3Aportfolio%20management&label_name[]=stage%3Aplan) and [Certify](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438188?&label_name[]=group%3A%3Acertify&label_name[]=stage%3Aplan) teams use a similar structure. The benefit of standardizing on a workflow is that it allows you to easily move from one team to the next and understand what's going on, as well as create rollup issue boards that cut across many teams. In the demo, I created a [stage level issue board](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438190?&label_name[]=stage%3Aplan) for Plan. All of the issues belong to the same project that contains the single repository where all of the product teams contribute.\n\nThe last thing to cover is using milestones to align everyone around a shared release cadence. In the demo, I created [two group milestones](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/milestones) that all of the issues map to. This allows you to see progress at a high level as well as a breakdown of issue status by label type. We're currently working on allowing an issue to be associated to [multiple milestones](https://gitlab.com/gitlab-org/gitlab/issues/5135), enabling milestones to have [types](https://gitlab.com/gitlab-org/gitlab/issues/35290), and adding [burnup charts](https://gitlab.com/gitlab-org/gitlab/issues/6903) to milestones. Once these launch, teams will have even more flexibility to create shared milestones as well as team-specific milestones.\n\n## Other options\n\nThere are a few less desirable ways to setup GitLab to help coordinate multiple product teams:\n\n### The project approach\n\nWith [the project approach](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/project-approach), you can create a project for each individual team and disable the repo. Then create a shared repo where all the merge requests go. Each product team would then have its own project-level milestones, issue boards, and issues, but could still nicely tie into the shared repository. Here's an [example issue and MR](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/project-approach/portfolio-management-team/issues/1) demonstrating how this works. The downside of this approach is that you lose the \"Create Merge Request\" button that issues will have if the issue and repo are within the same project. The \"Create Merge Request\" button allows you to quickly bootstrap your work by [spinning up a branch and WIP MR](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/shared-project/merge_requests/1). You also lose the ability to accurately track [cycle analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) out-of-the-box because it is currently based on project level data and not group level data.\n\n### Single issue tracker project\n\nI've seen some teams use a [single issue tracker project](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/multiple-repos) with multiple repositories within the group. They use a similar labeling system described in GitLab's approach, but separate the concerns of code management from issue management. While this works, it has the same downsides as the project approach.\n\n## General best practices & conventions\n\nTo sum things up, there are some generally good practices to follow when setting up your organizational structure within GitLab:\n\n- Centralize label management within the top level group. This helps avoid label duplication and keeps all teams on the same page.\n- Manage issues via issue boards from the group level and keep issues within the same project as their repository to get the most benefit from GitLab's capabilities.\n- Create issue boards for different activities – use milestone lists for a release planning board, use scoped workflow labels for a sprint board, etc.\n- Create shared milestones within the top-level group. This allows them to cascade throughout all sub-groups and projects.\n- If you use epics, a maximum of three layers of nesting is recommended to avoid confusion and unneccessary complexity.\n- Use [GitLab triage](https://gitlab.com/gitlab-org/gitlab-triage) to create policies to help automate issue management.\n\nIf you want to talk shop or bounce around ideas, feel free to reach out via email – gweaver at gitlab dot com.\n\nCover image by [Maksym Kaharlytskyi](https://unsplash.com/@qwitka) on [Unsplash](https://unsplash.com/photos/Q9y3LRuuxmg) {: .note}\n",[9,1132],{"slug":1235,"featured":6,"template":699},"how-to-setup-gitlab-for-multiple-product-teams","content:en-us:blog:how-to-setup-gitlab-for-multiple-product-teams.yml","How To Setup Gitlab For Multiple Product Teams","en-us/blog/how-to-setup-gitlab-for-multiple-product-teams.yml","en-us/blog/how-to-setup-gitlab-for-multiple-product-teams",{"_path":1241,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1242,"content":1248,"config":1253,"_id":1255,"_type":13,"title":1256,"_source":15,"_file":1257,"_stem":1258,"_extension":18},"/en-us/blog/how-to-use-agent-based-gitops",{"title":1243,"description":1244,"ogTitle":1243,"ogDescription":1244,"noIndex":6,"ogImage":1245,"ogUrl":1246,"ogSiteName":685,"ogType":686,"canonicalUrls":1246,"schema":1247},"How to use a pull-based (agent-based) approach for GitOps","Learn how GitLab supports agent-based approach for GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682037/Blog/Hero%20Images/agent-based-gitops-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-use-agent-based-gitops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a pull-based (agent-based) approach for GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-06-23\",\n      }",{"title":1243,"description":1244,"authors":1249,"heroImage":1245,"date":1250,"body":1251,"category":762,"tags":1252},[738],"2021-06-23","\n\nIn the previous post, titled [3 ways to approach GitOps](https://about.gitlab.com/blog/gitops-done-3-ways/), we discussed the many benefits and options that GitLab supports for fulfilling the [GitOps](/topics/gitops/) requirements of customers, whose IT environments are composed of heterogeneous technologies and infrastructures. This post is a 3-part series, in which we delve deeper into these options. In this first part, we cover the pull-based or agent-based approach.\n\n## About a pull-based or agent-based approach\n\nIn this approach, an agent is installed in your infrastructure components to pull changes whenever there is a drift from the desired configuration, which resides in GitLab. Although the infrastructure components could be anything from a physical server or router to a VM or a database, we will focus on a Kubernetes cluster in this section.\n\nIn the following example, the [reconciliation loop](https://about.gitlab.com/solutions/gitops/) is made up of two components: an agent running on the Kubernetes cluster and a server-side service running on the GitLab instance. One of the benefits of this approach is that you don’t have to expose your Kubernetes clusters outside your firewall. Another benefit is its distributed architecture, in that agents running on the infrastructure components are in charge of correcting any drift relieving the server-side from resource consumption. This approach requires the maintenance and installation of agents on all infrastructure components you want to be part of your GitOps flows.\n\n### GitLab Agent for Kubernetes as a pull-based approach\n\n[Introduced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#introducing-the-gitlab-kubernetes-agent) as part of GitLab 13.4, the GitLab Agent for Kubernetes runs on your Kubernetes cluster and pulls changes in your infrastructure configuration from GitLab to your cluster keeping your infrastructure configuration from drifting away from its desired state.\n\nGitLab Agent for Kubernetes (the feature) is currently implemented as two components ([architecture doc](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md)):\n\n- GitLab Agent for Kubernetes (agentk program): The component that users install into their cluster.\n\n- GitLab Agent for Kubernetes Server (kas program): The server-side counterpart, that runs \"next to GitLab.\"\n\nThe high-level architecture of the GitLab Agent for Kubernetes is depicted below:\n\n![GitLab K8s agent high-level architecture](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/0-K8s-agent-arch.png){: .shadow.small.center.wrap-text}\nGitLab K8s agent high-level architecture.\n{: .note.text-center}\n\nThe **agentk** is installed on your Kubernetes cluster and it is the component that applies updates to the infrastructure. The **kas** is installed on the GitLab instance and it manages the authentication and authorization between **agentk** instances and GitLab, monitors projects for any changes and gathers latest project manifests to send to **agentk** instances.\n\n> **NOTE:** on Gitlab.com, the **kas** is installed and maintained by GitLab. On self-managed instances, the customer needs to install it.\n\nIn the following self-managed instance example, we go through a GitOps flow that leverages the pull-based approach to GitOps.  After the **agentk** component has already been installed on the K8s cluster, the user proceeds to log on to the GitLab instance and creates a project called **gitops-project**:\n\n![Creating the gitops-project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/1-create-gitops-proj.png){: .shadow.medium.center.wrap-text}\nCreating the gitops-project.\n{: .note.text-center}\n\nThe project **gitops-project** will be the one that will be monitored or observed by the **kas** component. Then, under **gitops-project**, the user creates an empty manifest file called **manifest.yaml**. This is the manifest file that will contain the Infrastructure as Code configuration for this project:\n\n![Manifest file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/2-manifest-file-created.png){: .shadow.medium.center.wrap-text}\nManifest file created.\n{: .note.text-center}\n\nNext, the user creates a Kubernetes agent configuration repository project, **kubernetes-agent**, which will contain information pertinent to the **kas** component.\n\n![Creating the kubernetes-agent project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/3-create-K8s-agent-proj.png){: .shadow.medium.center.wrap-text}\nCreating the kubernetes-agent project.\n{: .note.text-center}\n\nWithin the **kubernetes-agent** project, the user creates a subdirectory **.gitlab/agents/agent1**, where **agent1** is the name given to this specific agent:\n\n![Config.yaml file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/4-config-yaml-created.png){: .shadow.medium.center.wrap-text}\nConfig.yaml file created.\n{: .note.text-center}\n\nNotice that in the screenshot above, the project to be observed, **gitops-project**, was created in an earlier step.\n\nThe next step consists of the creation of a GitLab Rails Agent record to associate it with the Kubernetes agent configuration repository project. In the following screenshot, you see the commands that the user enters to first identify the task-runner pod, to log into it, to enter the Rails Console, and finally to create the agent record and a token for it:\n\n![Agent record created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/5-agent-record-created.png){: .shadow.medium.center.wrap-text}\nAgent record created.\n{: .note.text-center}\n\nIn the above screenshot, the last command uses the agent token to create a secret on the K8s cluster for secured communication between the **agentk** and the **kas** components.\n\nThe **agentk** pod creation on the K8s cluster is the next step. For this, the user creates a **resources.yml** file, in which the secured communication protocol between the **agentk** and the **kas** is specified as shown in the following snippet:\n\n![Websockets line](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/6-wss-line-in-resources-yml.png){: .shadow.medium.center.wrap-text}\nWebSockets communication specified in the resources.yml file.\n{: .note.text-center}\n\nIn the above snippet, secured WebSockets protocol is being used. GitLab also supports gRPC.\n\nOnce the **resources.yml** file is updated with the corresponding GitLab instance information, the user proceeds to create the pod:\n\n![Agentk pod created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/7-agentk-created.png){: .shadow.medium.center.wrap-text}\nCreation of the **agentk** pod.\n{: .note.text-center}\n\nIn the screenshot above, you can see the execution of the **kubectl apply** that created the **agentk** pod in the K8s cluster.\n\nNow that the **agentk** and **kas** have been installed and are communicating securely with each other, the user can start performing some GitOps flows. Although the [GitLab Flow](https://about.gitlab.com/topics/version-control/what-is-gitlab-flow/) is the recommended approach for DevOps, it is also applicable to GitOps flows; after all GitOps is all about applying the goodness of DevOps to managing [Infrastructure as Code](/topics/gitops/infrastructure-as-code/).\n\nThis means that the user should create an issue and then a merge request, in which all stakeholders can collaborate towards the resolution of the issue. For the sake of brevity, in this technical blog post, we will skip all these steps and show you how updates to the Infrastructure as Code configuration files are automatically applied to the infrastructure components.\n\nNOTE: Fostering Collaboration is a great benefit of GitOps. For more information on this, check out this short [tech video](https://youtu.be/onFpj_wvbLM).\n\nFor example, the user can start making updates to the **manifest.yaml** file under the **gitops-project**, which is being observed by the kas component. Here you can see the user has pasted content into this file:\n\n![Manifest.yaml file updated](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/8-manifest-yaml-updated.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated.\n{: .note.text-center}\n\nRemember that this file had been created as an empty file. As soon as the user commits the changes displayed above, the **kas** component will detect the changes and communicate these to the **agentk** component, which is running on the K8s cluster. The **agentk** will immediately apply these changes to the infrastructure. In this example, the user has updated the infrastructure configuration file to have 2 instances of an nginx. As shown in the screenshot below, the **agentk** has applied these updates by the instantiation of 2 nginx pods in the K8s cluster:\n\n![Two nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/9-two-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates two nginx pods.\n{: .note.text-center}\n\nIf the user were to change the **manifest.yaml** file one more time and increment the replicas of the nginx pod to 3:\n\n![Manifest.yaml file updated with 3 nginx](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/10-manifest-yaml-updated-again.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated with 3 nginx instances.\n{: .note.text-center}\n\nAgain, as soon as the commit takes place, the **kas** component detects the update and communicates this to the **agentk** component, which in turn, spins up a third nginx pod in the K8s cluster:\n\n![Three nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/11-three-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates a third nginx pod.\n{: .note.text-center}\n\nLastly, the user can check the log files of the different components running on GKE, in this example. In the following screenshot, the user can see the **kas** component running on the GitLab instance:\n\n![kas running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/12-kas-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** component running on GKE.\n{: .note.text-center}\n\nAnd then the user can drill down into the log of the **kas** component, and see how it is detecting commits on the project it is observing:\n\n![kas log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/13-kas-log-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** log output on GKE.\n{: .note.text-center}\n\nLikewise, the user can navigate to the **agentk** component of the K8s cluster:\n\n![agentk running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/14-agentk-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** component running on GKE.\n{: .note.text-center}\n\nAnd, again drill down to its log to see, how the **agentk** component runs synchronizations with the **kas** component:\n\n![agentk log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/15-agentk-log-top-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** log output on GKE.\n{: .note.text-center}\n\nIn the following screenshot, the user sees the log statements indicating that the **agentk** is instantiating a third instance of an nginx pod:\n\n![agentk instantiating a third nginx pod](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/16-agentk-log-synced-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** instantiating a third nginx pod.\n{: .note.text-center}\n\nThe above sections described an example of the setup needed to install and run the GitLab Agent for Kubernetes as well as how projects are monitored and synchronized from GitLab to a running K8s cluster.\n\n## Conclusion\n\nWe have gone over the setup and use of the Agent, which is an integral part of our pull-based or agent-based approach to GitOps. We also covered a GitOps flow that leveraged this agent-based approach, which is a good choice for Kubernetes shops that need to keep their clusters secured and behind their firewall. This approach comes with its drawbacks in that you need to maintain the agents, which also consume the resources of your infrastructure components. In part two of this series, we will discuss the push-based or agentless approach to GitOps.\n\nCover image by [Vincent Ledvina](https://unsplash.com/@vincentledvina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/grand-tetons?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[549,869,718,743,9],{"slug":1254,"featured":6,"template":699},"how-to-use-agent-based-gitops","content:en-us:blog:how-to-use-agent-based-gitops.yml","How To Use Agent Based Gitops","en-us/blog/how-to-use-agent-based-gitops.yml","en-us/blog/how-to-use-agent-based-gitops",{"_path":1260,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1261,"content":1267,"config":1274,"_id":1276,"_type":13,"title":1277,"_source":15,"_file":1278,"_stem":1279,"_extension":18},"/en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os",{"title":1262,"description":1263,"ogTitle":1262,"ogDescription":1263,"noIndex":6,"ogImage":1264,"ogUrl":1265,"ogSiteName":685,"ogType":686,"canonicalUrls":1265,"schema":1266},"Installing GitLab on Raspberry Pi 64-bit OS","A Raspberry Pi enthusiast tries to run GitLab on the new 64-bit OS...and here's what happened.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679433/Blog/Hero%20Images/anto-meneghini-gqytxsrctvw-unsplash.jpg","https://about.gitlab.com/blog/installing-gitlab-on-raspberry-pi-64-bit-os","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Installing GitLab on Raspberry Pi 64-bit OS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-03-14\",\n      }",{"title":1262,"description":1263,"authors":1268,"heroImage":1264,"date":1270,"body":1271,"category":762,"tags":1272},[1269],"Brendan O'Leary","2022-03-14","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes.\nAs with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development and release, and timing of any products, features or functionality remain at the sole discretion of GitLab Inc._\n\nRecently the 64-bit version of [Raspberry Pi OS](https://www.raspberrypi.com/software/) came out of a long-awaited beta, and as a Raspberry Pi enthusiast, I was eager to get my hands on it. While the 64-bit version isn't compatible with all Pi hardware, it's exciting to see the expansion of the ecosystem to allow for better access to RAM and software compatibility as 32-bit support becomes less common.\n\nBut speaking of software support - what about running GitLab on the new 64-bit OS? Did you know that GitLab already has support for [Raspberry Pi OS](/install/#raspberry-pi-os)? We even have documentation on [optomizing GitLab on a Raspberry Pi](https://docs.gitlab.com/omnibus/settings/rpi.html) for folks who want to run their self-hosted DevOps platform on simple hardware like the Pi?\n\nNow, the distribution team would want me to point out that official support for ARM64 is still [in the works](https://gitlab.com/groups/gitlab-org/-/epics/2370), but that didn't stop me from at least wanting to try to install GitLab on this exciting new platform. Remember that your mileage may vary - and don't use this in production as it isn't yet officially supported.  \n\nBut that's never stopped me before, so I grabbed my Raspberry Pi 4, a new Micro SD card, and the updated [Raspberry Pi Imager](https://downloads.raspberrypi.org/imager/imager_latest.dmg) and got started.\n\n## Getting Started\n\nThe typical [install for GitLab on the Raspberry Pi](/install/#raspberry-pi-os) assumes you have the standard 32-bit version of `raspbian/buster` that has been standard for some time. So following those steps, I ran into an error with the install script.\n\nWhen running \n\n```bash \nsudo curl -sS https://packages.gitlab.com/install/repositories/gitlab/raspberry-pi2/script.deb.sh | sudo bash\n```\n\nIt appeared to work, but if I tried to install GitLab I'd get this error\n\n```bash\n$ sudo EXTERNAL_URL=\"https://gitpi.boleary.dev\" apt-get install gitlab-ce\n\nReading package lists... Done\nBuilding dependency tree... Done\nReading state information... Done\nPackage gitlab-ce is not available, but is referred to by another package.\nThis may mean that the package is missing, has been obsoleted, or\nis only available from another source\n \nE: Package 'gitlab-ce' has no installation candidate\n```\nThat's related to the fact that specifically this version of Raspberry Pi OS isn't supported yet - but since it is a fork of Debian Linux, I was able to work around that.\n\n## Manual Installation\n\nTo get started with a slightly modified installation path, I first got the package details and appropriate prerequisite libraries installed:\n\n```bash\ncurl -s https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash\n\nsudo apt-get update\n\nsudo apt-get install debian-archive-keyring\n\nsudo apt-get install curl gnupg apt-transport-https\n\ncurl -L https://packages.gitlab.com/gitlab/gitlab-ce/gpgkey | sudo apt-key add -\n```\n\nThen I created a new sources list to point `apt` to for the installation with `sudo touch /etc/apt/sources.list.d/gitlab_gitlab-ce.list`\n\nNext, I manually added the Debian Buster repositories to that sources list I just created by modifying  `/etc/apt/sources.list.d/gitlab_gitlab-ce.list` to add:\n\n```\ndeb https://packages.gitlab.com/gitlab/gitlab-ce/debian/ buster main\ndeb-src https://packages.gitlab.com/gitlab/gitlab-ce/debian/ buster main\n```\n\n## Finishing Up\nFrom there, it was easy to install the 'standard' way, with apt-get handling the rest for me.\n\n```bash\nsudo apt-get update\n\nsudo EXTERNAL_URL=\"http://gitpi.boleary.dev\" apt-get install gitlab-ce\n```\n\n## Next Steps\n\nNow, those who love DNS will notice that I was pointing to a fully qualified domain name, but it points to a private address if you look up that address.\n\n```bash\ndig gitpi.boleary.dev\n; \u003C\u003C>> DiG 9.10.6 \u003C\u003C>> gitpi.boleary.dev\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 512\n;; QUESTION SECTION:\n;gitpi.boleary.dev.\t\tIN\tA\n\n;; ANSWER SECTION:\ngitpi.boleary.dev.\t300\tIN\tA\t100.64.205.40\n```\n\nIsn't that interesting?  What does it mean - can I access it from outside my house's network?  And how will I get it to work with HTTPs on that private address?\n\nFor those answers, you'll have to stay tuned to my next article about running GitLab on the Raspberry Pi: Hosting a private GitLab server with Tailscale and LetsEncrypt.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@antomeneghini?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Anto Meneghini\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/raspberries?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n  \n",[9,232,1273],"performance",{"slug":1275,"featured":6,"template":699},"installing-gitlab-on-raspberry-pi-64-bit-os","content:en-us:blog:installing-gitlab-on-raspberry-pi-64-bit-os.yml","Installing Gitlab On Raspberry Pi 64 Bit Os","en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os.yml","en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os",{"_path":1281,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1282,"content":1288,"config":1295,"_id":1297,"_type":13,"title":1298,"_source":15,"_file":1299,"_stem":1300,"_extension":18},"/en-us/blog/is-serverless-the-end-of-ops",{"title":1283,"description":1284,"ogTitle":1283,"ogDescription":1284,"noIndex":6,"ogImage":1285,"ogUrl":1286,"ogSiteName":685,"ogType":686,"canonicalUrls":1286,"schema":1287},"Is serverless the end of ops?","What is Serverless architecture, what are the pros and cons of using it and where will it go in the future?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671845/Blog/Hero%20Images/serverless-ops-blog.jpg","https://about.gitlab.com/blog/is-serverless-the-end-of-ops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Is serverless the end of ops?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-09-12\",\n      }",{"title":1283,"description":1284,"authors":1289,"heroImage":1285,"date":1291,"body":1292,"category":1293,"tags":1294},[1290],"Chrissie Buchanan","2019-09-12","\nWe’re not playing tricks when we say [serverless](/topics/serverless/) isn’t actually serverless. It’s not that servers aren’t doing work, it’s just that _your_ servers aren’t necessarily having to do the work. In these exciting times of automation, not having to worry about servers seems pretty appealing.\n\n[Serverless architecture has an annual growth rate of over 700%](https://hackernoon.com/severe-truth-about-serverless-security-and-ways-to-mitigate-major-risks-cd3i3x6f) and shows no signs of slowing down. Its popularity is all due to the operational efficiency it promises. Instead of worrying about infrastructure, you can essentially outsource those responsibilities to your cloud provider. Once you specify the resources your code requires, the cloud provider provisions the servers and deploys. Even better, you only pay for what is used.\n\nThe dream of serverless computing is pretty simple: Developers deploy into infrastructures they don’t have to manage, set up, or maintain. Once they upload a simple cloud function it _just works_. Since organizations are only paying for what they use, this system is infinitely scalable, and because this is all managed by a cloud provider, they take over security as well.\n\nWith a serverless architecture carrying all of the ops load, what does that mean for sysadmins?\n\n## Serverless: The end of ops?\n\nServerless hype hasn’t been without skepticism. On the ops side of things, there has been some concern that serverless is trying to force ops out of the picture. A successful [DevOps team structure](/topics/devops/build-a-devops-team/) is all about dev and ops working together but, as we well know, there are some challenges to overcome. For one: dev and ops teams are incentivized by vastly different things. Development wants faster feature delivery, whereas operations wants stability and availability. These two goals contradict each other. With serverless bypassing ops altogether, it unintentionally reinforces the “ops as a barrier” trope.\n\nGetting to the point: No, serverless is not the end of ops as we know it. Ops looks after monitoring, security, networking, support, and the overall stability of a system. Serverless is just one way of managing systems, but it isn’t the only way. [The sysadmin is still happening – you’re just outsourcing it with serverless](https://martinfowler.com/articles/serverless.html), and that’s not necessarily a bad (or good) thing.\n\nEven with so many new technologies and methodologies out there – Kubernetes, serverless, containerization – the basics of computing remain the same. It’s only when we understand the fundamentals and commit to building reliable code that we can make the most of these new platforms.\n\n[In a recent interview with Google Staff Developer Advocate Kelsey Hightower](/blog/kubernetes-chat-with-kelsey-hightower/), one of the biggest challenges he mentions is the “all-or-nothing” approach. “Either I’m all serverless, or I’m all Kubernetes, or I’m all traditional infrastructure. That has never made sense in the history of computing.” Ultimately, you don’t have to choose: Pick the platforms that work best for the job. Monoliths are easy to build and run, and microservices and Kubernetes can help organizations scale faster. Serverless is just another tool that teams can use to keep innovating.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9OHNejqXOoo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n## Serverless pros and cons\n\nAs with any architecture, there are going to be some benefits and some disadvantages. It’s important to weigh the pros and cons carefully against your organization’s needs.\n\n### Less operational overhead\n\nThis is frequently listed as one of the biggest advantages of serverless. Security patches, server upgrades, and other maintenance are already taken care of, which can free up resources for more important things.\n\n### Scalability\n\nYou just upload a code/function and your cloud provider handles the rest. [Serverless allows as many functions to be run (in parallel, if necessary) as needed to continually service all incoming requests](https://hackernoon.com/what-is-serverless-architecture-what-are-its-pros-and-cons-cc4b804022e9). Or you can have serverless run an entire application (with frontend, backend, etc.) and still reap the benefits. Because you’re not boxed into a certain pricing structure or number of minutes, serverless can be infinitely scalable (in theory).\n\n### Less operating costs\n\nYou’re only using what you need and all costs are purely based on usage. Finances are dynamic, which is more representative of how companies actually operate.\n\nOne example of this concept is comparing a rideshare service to the costs of owning a vehicle. With a car, there are costs you pay regardless of usage (insurance, registration, car payment), there are costs you pay depending on the usage (gas, maintenance), and then there are additional costs tied to unforeseen circumstances (accidents, that pothole again). With a rideshare, you’re just paying to go from point A to point B – all car costs we listed previously are being taken care of by someone else.\n\n### Less control\n\nOften cited as the biggest con, what you gain in reduced operational costs, complexity, and engineering lead time comes with [increased vendor dependencies](https://martinfowler.com/articles/serverless.html) and less oversight. There has to be a lot of trust in the cloud vendor since you’ll be unable to manage the server yourself. Not having control of your system means that if errors happen, you’re reliant on someone else to fix them. In business, no one cares more about your problems than you do.\n\n### Potential security risks\n\nWhile cloud vendors will manage security for you, and are generally well equipped for that task, it’s the architecture of serverless itself that could introduce vulnerabilities into the system. The problem is especially true for serverless applications built on top of microservices, with independent pieces of software interacting through numerous APIs. Gartner warns that [APIs will become the major source of data breaches by 2022](https://www.gartner.com/doc/3834704/build-effective-api-security-strategy).\n\n### Unpredictable costs\n\nHow can we list costs as both a pro and a con? That’s mainly due to the elasticity serverless offers. Since everything is event-triggered, rather than paid up front, elasticity becomes a double-edged sword: You’re not paying for cloud usage you don’t need, but it being so easy to use means you may end up using more.\n\nFor another real-world example of this concept in action, let’s examine ketchup, mainly the introduction of the plastic squeeze bottle.\n\nHeinz ketchup had been served in the iconic glass bottles we all know and love since 1890, but in 1983 the Heinz corporation unveiled the squeezable plastic bottle to consumers. This was heralded as a huge innovation – consumers could squeeze more precisely, the bottles were unbreakable which reduced losses in shipment, and the ergonomic design made it perfect for hands of all sizes. After the introduction of the new squeezable bottle, [ketchup sales went up by 3.7% from the prior year](https://www.npr.org/sections/thesalt/2014/04/29/306911004/whats-the-secret-to-pouring-ketchup-know-your-physics). Why? Now that ketchup could be dispensed more easily, people used a lot more of it. Instead of tapping on a glass bottle hoping for a drop, the ketchup cup runneth over.\n\nWith serverless being so easy to use, it’s best to assume that developers will use it more than you expect.\n\n## Where are we on our serverless journey?\n\nSo much of the literature about serverless comes from the cloud providers themselves, so of course it focuses on the most idealized vision of what serverless can be. As a result, those in the ops community felt like they were being forced out, and organizations were too busy paying attention to the benefits to see the potential downsides.\n\nServerless opens up a lot of opportunities in DevOps, and offers a unique solution for many use cases. Does this mean that sysadmins everywhere will soon be out of a job? Probably not. Serverless is just another tool in the toolbox, and at GitLab we’re exploring how to help users leverage Knative and Kubernetes to define and manage serverless functions in GitLab. We’re also looking into how we can be even more multi-faceted. Some users want to work with a Kubernetes cluster, some want to push a serverless function into AWS Lambda. We can already help with monoliths and microservices, and we’re actively working on supporting serverless as well.\n\nInterested in joining the conversation for this category? Please join us in our [public epic](https://gitlab.com/groups/gitlab-org/-/epics/155) where we discuss this topic and we can answer any questions you might have. Everyone can contribute.\n\nPhoto by [Tomasz Frankowski](https://unsplash.com/@sunlifter?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n","insights",[720,869,9],{"slug":1296,"featured":6,"template":699},"is-serverless-the-end-of-ops","content:en-us:blog:is-serverless-the-end-of-ops.yml","Is Serverless The End Of Ops","en-us/blog/is-serverless-the-end-of-ops.yml","en-us/blog/is-serverless-the-end-of-ops",{"_path":1302,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1303,"content":1309,"config":1316,"_id":1318,"_type":13,"title":1319,"_source":15,"_file":1320,"_stem":1321,"_extension":18},"/en-us/blog/lessons-learned-as-data-team-manager",{"title":1304,"description":1305,"ogTitle":1304,"ogDescription":1305,"noIndex":6,"ogImage":1306,"ogUrl":1307,"ogSiteName":685,"ogType":686,"canonicalUrls":1307,"schema":1308},"Lessons learned managing the GitLab Data team","Staff Data Engineer Taylor Murphy shares his lessons and takeways from one year as the Data team manager.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/lessons-learned-as-data-team-manager","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons learned managing the GitLab Data team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor Murphy\"}],\n        \"datePublished\": \"2020-02-10\",\n      }",{"title":1304,"description":1305,"authors":1310,"heroImage":1306,"date":1312,"body":1313,"category":802,"tags":1314},[1311],"Taylor Murphy","2020-02-10","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-02-19.\n{: .alert .alert-info .note}\n\nFrom April 2018 to May 2019 I was the manager of the Data team for GitLab. I took this role after my manager left, when I started reporting directly to the CFO as a Data Engineer.\n\nI remember saying to him \"this doesn't seem like the right level of abstraction for you,\" and proposed I step up to become the manager. I also said I didn't want to do this for a long period of time, since I intentionally came to GitLab to move from a manager role to an individual contributor role and focus on Data Engineering.\n\nWhat follows are a few lessons I learned (and relearned!) in my one-year stint as the manager of the Data team. Eventually, I aim to become a manager again and I hope to remember these lessons and learn even more.\n\n### Plan for growth\n\nWhile I was Data team manager, GitLab grew in size by ~300%. Having only worked previously at established companies and at a very small startup, I was not prepared for this level of growth and the strain it would put on our resources.\n\nI recently surveyed colleagues of mine in the data community and discovered that, as a percentage of headcount, most Data teams are anywhere from 2-8%.\n\nThis means a 200-person company should have at least four people, and realistically around 10 people, focused on data. This includes analysts, engineers, scientists, and managers.\nIn April of 2018, we were at \u003C 1% (1/300) and would continue to be \u003C 1% throughout 2018.\n\nAs the company grew, I did not wholly understand how the business was planning to grow and how the Data team would scale to meet the data needs of the organization. This lack of strategic thinking led to a situation where I felt blindsided and overwhelmed by the number of requests for data and analytics.\n\nEven with the addition of the excellent people I was able to hire, I wasn't doing as good a job as I needed to help my team truly succeed.\n\nLesson: Understand the trajectory of the company, the workload you have and expect to have, pick a gearing ratio for headcount, stick to your hiring targets, and think about [team structure](https://blog.getdbt.com/data-team-structure-examples/).\n{: .alert .alert-gitlab-purple}\n\n### Individual contributor or manager? Pick one\n\nBy the end of 2018, the Data team was a three-person team: one data analyst, one data engineer, and me.\nThankfully, the three of us were, I'm not ashamed to say, excellent at our jobs and performed at a level beyond what you would expect three FTEs to handle.\n\nBut even we have limits and couldn't do it all.\n\nDue to the volume of work we were trying to accomplish, it was critical that I take on analyst and engineering work as well.\n\nThis created a situation where I was splitting my brain and my attention trying to do too many things at once.\n\nSome days would be all manager work, and I would make zero progress on issues assigned to me. Others would be IC work, and I would fall behind on managerial tasks. The worst days were when I would try to do both, and everything would suffer.\n\nAs time went on this split brain effect would become worse – the signs of burnout were starting to ramp up rapidly.\n\nI was able to hire more people, which put more demand on the manager side of me, yet the volume of work was increasing while I was still the primary contributor and maintainer of our codebase. By the end, I didn't feel like I was a good manager, and I felt like my technical skills were rapidly atrophying.\n\nLesson: If you're a manager, be a manager. Yes, you'll have to pick up some work, especially at a startup, but figure out your exit plan so you can pass that work to your team who will be much better at accomplishing it than you.\n{: .alert .alert-gitlab-purple}\n\n### Hire awesome people\n\nThis should go without saying, but hire excellent people and your life will be better. My first four hires for the Data team (two in 2018, two in early 2019) have blown me away with their skill, curiosity, tenacity, and intelligence.\n\nI learned from my previous job and past bosses the value in finding great people and the force multiplier they can have on the work you're trying to accomplish.\n\nLesson: Continue hiring great people! But think about how to scale it.\n{: .alert .alert-gitlab-purple}\n\n### Invest in process\n\nThis lesson I learned from [Emilie Schario](https://gitlab.com/emilie), the first Data Analyst I hired. She taught me to think about how and where we'll need processes as the company scaled, so we could remain [efficient](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nWe, of course, used GitLab for managing our code, and we had built-in merge request workflows, but she took the time to think about the messy \"people stuff\" surrounding the technology.\n\nA short list of artifacts she created:\n\n- [Onboarding issue for new analysts](https://gitlab.com/gitlab-data/analytics/-/blob/master/.gitlab/issue_templates/Data%20Onboarding.md)\n- [Onboarding script to get new analysts up and running quickly](https://gitlab.com/gitlab-data/analytics/-/blob/master/admin/onboarding_script.sh)\n- [Merge request templates, so everyone is working off the same checklist](https://gitlab.com/gitlab-data/analytics/-/blob/master/.gitlab/merge_request_templates/dbt%20Model%20Changes.md)\n\nAnd many more I'm sure I'm forgetting.\n\nWhile she wasn't the manager, she had the experience and understood the parts of working at a company that can slow down team members, and she worked to automate as much of it as possible. I've heard from many people outside the company how much they appreciate our documentation in general and our onboarding process in particular.\n\nThat is a testament to thinking about scale and having the empathy to continually step into the shoes of a GitLab learner and to see things from an outsider's perspective.\n\nAs Data teams have grown and evolved they've also [become more technical](https://blog.getdbt.com/what-is-an-analytics-engineer/). These mean it's important to invest in the technical process as well – this means you should have [version control](/topics/version-control/), change control (merge requests), automated testing, and [documentation on everything you're doing](https://dbt.gitlabdata.com/).\n\nCertain tools make implementing technical processes better and easier, which I'll highlight in the next section.\n\nLesson: (1) Think about process deeply and document everything. (2) Maintain the mind of a learner and continually think about what day one with GitLab is like for new people. (3) Invest in process, documentation, and testing - they are gifts you give your future self.\n{: .alert .alert-gitlab-purple}\n\n### Pick excellent tools\n\nAlong with process, picking the right tools can be a force multiplier for team productivity. When the Data team started, we were using PostgreSQL as our data warehouse. Postgres is not column-oriented, and at a certain point it doesn't make sense to use it as an analytics database.\n\nWe went with Postgres anyway because we believe in using a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and it aligns with our value of [iteration](https://handbook.gitlab.com/handbook/values/#iteration). For the volume of data we were throwing at it, Postgres did admirably. We used the CloudSQL-hosted version which enabled us to do cool, programmatic things with GitLab CI (I'll save that for another post).\n\nOnce we outgrew Postgres we decided to move to Snowflake.\n\nOf course, being GitLab, we use GitLab the product for anything and everything, which saved us much of the stress around picking tools. It has all the things you want from a coding perspective, and it has enough of the things you need to be productive as a manager. No need for Trello, Jira, and a dozen other tools.\n\nBy far though, the best tool for the Data team's productivity is [dbt (data build tool)](https://www.getdbt.com/). I could talk forever about how great dbt is, but suffice to say that we would not be where we are today and we would not have been able to support the organization this well with such a small crew, were it not for dbt and the great community behind it.\n\nLesson: Find the best tools you can for your team. Use dbt!\n{: .alert .alert-gitlab-purple}\n\n### Handling under-performers is a challenge\n\nUp until 2019, I'd never hired somebody who didn't perform well in their job, aside from a few interns.\nI'd like to think most of this was my ability to find good people, but it was probably luck, if I'm being honest.\n\nLast year challenged me with two under-performers on the team that I now realize I could have supported better. Having those difficult conversations with people was hard when I wasn't 100% in the manager brain space. My advice is to pay attention to those first few weeks of productivity, and if you find there are gaps, either in skills or motivation, do whatever you can to call out the gaps in a friendly and productive way, and then give your people every opportunity to become better.\n\nLesson: Be a good manger, notice things early, and help your team proactively.\n{: .alert .alert-gitlab-purple}\n\n### So many meetings\n\nGitLab has a great culture around meetings.\n\nThey [always start on time](https://handbook.gitlab.com/handbook/communication/#video-calls), there [must be an agenda for every meeting](https://handbook.gitlab.com/handbook/communication/#scheduling-meetings), and [people aren't afraid to end meetings early if everything on the agenda is done](https://handbook.gitlab.com/handbook/values/#be-respectful-of-others-time).\nEven with this rigor and discipline you will find yourself on the [\"Manager's Schedule\"](http://www.paulgraham.com/makersschedule.html) and will be in a lot of meetings. But that's okay! That's part of your job.\n\nI will always argue that you should still try to reduce the time you're in meetings, but if you're in a meeting, do your best to ensure your team *isn't* also in a meeting, if at all possible. Meetings are terrible for makers (i.e., your direct reports). Shield your team from them as much as possible.\n\nLesson: Meetings are a part of the job, reduce them as much as you can, and protect your team from unnecessary meetings.\n{: .alert .alert-gitlab-purple}\n\n### You need executive buy-in and representation\n\nPart of the reason I was excited to join GitLab was because the C-Suite clearly supported having a Data team in the organization.\nThe CEO and CFO understood the value a Data team could bring, even if the specifics and execution were blurry.\nThis is important! You will be in a tough spot if your company has nobody on the executive team that understands the value that good descriptive and predictive analytics can provide.\nData literacy is a cultural attribute, and it's [near impossible to grow literacy](https://towardsdatascience.com/is-your-company-too-dumb-to-be-data-driven-696932d597c3) in an organization if the CEO isn't driving it in some way.\n\nAt a certain scale though, you need Data leadership beyond a team manager.\nYou absolutely need someone at the Director level and up that can advocate and champion Data literacy and fluency across the functional areas of the organization.\nManagers can't be expected to spend much time on this since there is so much daily work to be done.\n\nLesson: Be wary of organizations that don't have C-Suite buy-in around the data function.\nAdvocate for a Director-level and up position that can be the cheerleader for Data across the organization.\n{: .alert .alert-gitlab-purple}\n\n### Plan to spend some money\n\nExecutive level buy-in for a Data team is important because of this fact: Starting a Data team can be expensive. To be effective, you'll need to hire several people or empower your single data lead to purchase some third-party software.\n\nOut of the gate you'll need an extract and load tool like Stitch or Fivetran, you'll need a data warehouse (e.g., Snowflake, BigQuery, Redshift), you'll need compute to run transform jobs, and you'll want a BI tool.\nThere are free tools that can sustain you for a while, but plan to invest some money up front if you're in it for the long haul.\n\nLesson: Long term success will require investment. You can start cheaply, but to scale requires resources.\n{: .alert .alert-gitlab-purple}\n\n### Don't reinvent the wheel\n\nEspecially for things like extracting data from tools such as Salesforce, Zendesk, or Zuora, please, please, PLEASE don't write your own scripts to do this. Just pay a company to do it for you. You'll waste a ton of time doing something that doesn't deliver business value and will probably come back to bite you in the end.\n\nYou should spend most of your time [delivering value for the business](https://blog.getdbt.com/the-startup-founder-s-guide-to-analytics/) in the form of automated reporting and generating insights, not writing a Salesforce to Snowflake extractor for the thousandth time.\n\nLesson: Pay for Stitch or Fivetran for common data extractions.\n{: .alert .alert-gitlab-purple}\n\n### Manager is a different career\n\nDon't think about becoming a manager as an extension of your individual contributor career. It *is* a different career path and your IC-skills will certainly help you be a better manager. However, management is its own set of skills and choosing to go into this field puts you on a different career path. It's not necessarily better depending on how you define success.\n\nGo into management with open eyes and a full understanding that you are switching tracks and not \"moving ahead\". It isn't permanent, though, and can be reversed if you choose.\n\nLesson: Don't assume the move to manager is the default for an IC. Think deeply about your [career](https://www.locallyoptimistic.com/post/career-ladders-part-1/). Read [about the Engineer/Manager Pendulum](https://charity.wtf/2017/05/11/the-engineer-manager-pendulum/).\n{: .alert .alert-gitlab-purple}\n\n### It's okay to be a little selfish\n\nOne area I've struggled with for a while is making the effort to be a little selfish. I can have a people-pleaser mentality which, when applied to the business of a startup, can be useful: Startups need people that are willing to do what it takes to make the company successful (within reason!). But once the company is in a growth stage or beyond, that mentality is a recipe for burnout.\n\nAt my previous company, we were less than 30 people. Having the attitude of trying to do and learn as much as possible was a good strategy for me. I learned a ton, was given a bunch of responsibility, and helped the business grow. That strategy worked for me at GitLab for a while too. After some time passed, it was clear I couldn't keep up with everything, and my sanity would start to suffer without a fix.\n\nBeing selfish in this case meant I had to be okay with wanting to take a \"step back\" from the manager role to the IC role (Spoiler: it's not a step back! See the previous point).\n\nI had to admit to myself that I wanted to focus on programming more and that continuing down the manager track wasn't currently right for me.\n\nIt felt selfish because it was hard in the moment to see that what the business needed was somebody who *wanted* to be the manager. It didn't need me to continue in the role just because I happened to currently be in the role.\n\nWhile there were short-term ramifications for the team because of my move to an IC role, I know that I'm healthier for it, and we now have two excellent managers who are leading the team further than I could have.\n\nLesson: (1) It's a *good* thing to prioritize and be selfish about your mental health. (2) It's okay to say \"No, I can't do this anymore\". (3) Companies need people who want to be in their jobs - performance is better and people are happier.\n{: .alert .alert-gitlab-purple}\n\n### Fin\n\nMy hope is that these lessons are valuable to you, and are applicable in your own life and career. I would love to hear from you if you disagree with any of these, or if you have your own stories and lessons to share about your career in data; please reach out on [Twitter](https://twitter.com/tayloramurphy), via email (tmurphy at gitlab.com), or in an [issue in our main project](https://gitlab.com/gitlab-data/analytics/).\n\nThank you for reading and thank you to GitLab for enabling my growth as a Data professional.\n\n*Special thanks to [Emilie Schario](https://gitlab.com/emilie) for her review on multiple drafts of this post.*\n",[9,1315,1091],"design",{"slug":1317,"featured":6,"template":699},"lessons-learned-as-data-team-manager","content:en-us:blog:lessons-learned-as-data-team-manager.yml","Lessons Learned As Data Team Manager","en-us/blog/lessons-learned-as-data-team-manager.yml","en-us/blog/lessons-learned-as-data-team-manager",{"_path":1323,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1324,"content":1330,"config":1336,"_id":1338,"_type":13,"title":1339,"_source":15,"_file":1340,"_stem":1341,"_extension":18},"/en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"title":1325,"description":1326,"ogTitle":1325,"ogDescription":1326,"noIndex":6,"ogImage":1327,"ogUrl":1328,"ogSiteName":685,"ogType":686,"canonicalUrls":1328,"schema":1329},"How Comet can streamline machine learning on The GitLab DevOps Platform","Here's a step-by-step look at how to bring ML into software development using Comet on GitLab's DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/machine-learning-on-the-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Comet can streamline machine learning on The GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-11-08\",\n      }",{"title":1325,"description":1326,"authors":1331,"heroImage":1327,"date":1332,"body":1333,"category":762,"tags":1334},[971],"2021-11-08","\n\nBuilding machine learning-powered applications comes with numerous challenges. When we talk about these challenges, there is a tendency to overly focus on problems related to the quality of a model’s predictions—things like data drift, changes in model architectures, or inference latency. \n\nWhile these are all problems worthy of deep consideration, an often overlooked challenge in [ML development](/topics/devops/the-role-of-ai-in-devops/) is the process of integrating a model into an existing software application.  \n\nIf you’re tasked with adding an ML feature to a product, you will almost certainly run into an existing codebase that must play nicely with your model. This is, to put it mildly, not an easy task. \n\nML is a highly iterative discipline. Teams often make many changes to their codebase and pipelines in the process of developing a model. Coupling an ML codebase to an application’s dependencies, unit tests, and CI/CD pipelines will significantly reduce the velocity with which ML teams can deliver on a solution, since each change would require running these downstream dependencies before a merge can be approved.  \n\nIn this post, we’re going to demonstrate how you can use [Comet](https://www.comet.ml/site/) with [GitLab’s DevOps platform](/solutions/devops-platform/) to streamline the workflow for your ML and software engineering teams, allowing them to collaborate without getting in each other's way.      \n\n## The challenge for ML teams working with application teams\n\nLet’s say your team is working on improving a feature engineering pipeline. You will likely have to test many combinations of features with some baseline model for the task to see which combinations make an impact on model performance.     \n \nIt is hard to know beforehand which features might be significant, so having to run multiple experiments is inevitable. If your ML code is a part of your application codebase, this would mean having to run your application’s CI/CD pipeline for every feature combination you might be trying. \n\nThis will certainly frustrate your Engineering and DevOps teams, since you would be unnecessarily tying up system resources, given that software engineering teams do not need to run their pipelines with the same frequency as ML teams do.  \n\nThe other issue is that despite having to run numerous experiments, only a single set of outputs from these experiments will make it to your production application. Therefore, the rest of the assets produced through these experiments are not relevant to your application code.     \n\nKeeping these two codebases separated will make life a lot easier for everyone – but it also introduces the problem of syncing the latest model between two codebases.     \n\n## Use The GitLab DevOps Platform and Comet for your model development process\n\nWith The GitLab DevOps platform and Comet, we can keep the workflows between ML and engineering teams separated, while enabling cross-team collaboration by preserving the visibility and auditability of the entire model development process across teams.     \n\nWe will use two separate projects to demonstrate this process. One project will contain our application code for a handwritten digit recognizer, while the other will contain all the code relevant to training and evaluating our model.  \n\nWe will adopt a process where discussions, code reviews, and model performance metrics get automatically published and tracked within The GitLab DevOps Platform, increasing the velocity and opportunity for collaboration between data scientists and software engineers for machine learning workflows.\n\n## Project setup\n\nOur project consists of two projects: [comet-model-trainer](https://gitlab.com/tech-marketing/devops-platform/comet-model-trainer) and [ml-ui](https://gitlab.com/tech-marketing/devops-platform/canara-review-apps-testing). \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/cometmodeltrainer.png){: .shadow}\n\nThe **comet-model-trainer** repository contains scripts to train and evaluate a model on the MNIST dataset. We have set up The GitLab DevOps Platform in a way that runs the training and evaluation Pipeline whenever a new merge request is opened with the necessary changes.\n\nThe **ml-ui** repository contains the necessary code to build the frontend of our ML application.\n\nSince the code is integrated with Comet, your ML team can easily track the source code, hyperparameters, metrics, and other details related to the development of the model.  \n\nOnce the training and evaluation steps are completed, we can use Comet to fetch summary metrics from the project as well as metrics from the Candidate model and display them within the merge request; This will allow the ML team to easily review the changes to the model. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/buildmodelgraph.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/summarymetrics.png){: .shadow}\n\nIn our case, the average accuracy of the models in the project is 97%. Our Candidate model achieved an accuracy of 99%, so it looks like it is a good fit to promote to production. The metrics displayed here are completely configurable and can be changed as necessary.        \n\nWhen the merge request is approved, the deployment pipeline is triggered and the model is pushed to Comet’s Model Registry. The Model Registry versions each model and links it back to the Comet Experiment that produced it.  \n![Alt text for your image](https://about.gitlab.com/images/blogimages/OpenComet_SparkVideo.gif){: .shadow}    \n\nOnce the model is pushed to the Model Registry, it is available to the application code. When the application team wishes to deploy this new version of the model to their app, they simply have to trigger their specific deployment pipeline.     \n\n## Running the pipeline\n\n### Pipeline outline\n\nWe will run the process outlined below every time a team member creates a merge request to change code in the `build-neural-network`script:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/modelapprove.png){: .shadow}\n\nNow, let’s take a look at the yaml config used to define our CI/CD pipelines depicted in the previous diagram:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/workflowsbranch.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/script.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/registermodel.png){: .shadow}\n\nLet's break down the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs.\n\nWe start by instructing our GitLab runners to utilize Python:3.8 to run the jobs specified in the pipeline: \n\n`Image: python:3.8`\n\nThen, we define the job where we want to build and train the neural network:\n\n`Build-neural-network`\n\n### Build-neural-network \n\nIn this step, we start by creating a folder where we will store the artifacts generated by this job, install dependencies using the requirements.txt file, and finally  execute the corresponding Python script that will be in charge of training the neural network. The training runs in the GitLab runner using the Python image defined above, along with its dependencies.\n\nOnce the `build-neural-network` job has finalized successfully, we move to the next job: `write-report-mr`\n\nHere, we use another image created by DVC that will allow us to publish a report right in the merge request opened by the contributor who changed code in the neural network script. In this way, we’ve brought software development workflows to the development of ML applications. With the report provided by this job, code and model review can be executed within the merge request view, enabling teams to collaborate not only around the code but also the model performance.\n\nFrom the merge request page, we get access to loss curves and other relevant performance metrics from the model we are training, along with a link to the Comet Experiment UI, where richer details are provided to evaluate the model performance. These details include interactive charts for model metrics, the model hyperparameters, and Confusion Matrices of the test set performance, to name a few. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/manualDeploy_SparkVideo.gif){: .shadow}\n\nWhen the team is done with the code and model review,  the merge request gets approved, and the script that generated the model is merged into the main codebase, along with its respective commit and the CI pipeline associated to it. This takes us to the next job: \n\n### Register-model\n\nThis job uses an integration between GitLab and Comet to upload the reviewed and accepted version of the model to the Comet Model Registry. If you recall, the Model Registry is where models intended for production can be logged and versioned. In order to run the commands that will register the model, we need to set up these variables: \n\n- COMET_WORKSPACE\n- COMET_PROJECT_NAME \n \nIn order to do that, follow the steps described [here](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-an-instance).\n\nIt is worth noting that the `register-model` job only runs when the merge request gets reviewed and approved, and this behavior is obtained by setting `only: main` at the end of the job.\n\nFinally, we decide to let a team member have final control of the deployment so therefore we define a manual job:\n`Deploy-ml-ui`\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/deployuiml.png){: .shadow}\n\nWhen triggered, this job will import the model from Comet’s Model Registry and automatically create the necessary containers to build the user interface and deploy to a Kubernetes cluster. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/downstream.png){: .shadow}\n\nThis job triggers a downstream pipeline, which means that the UI for this MNIST application resides in a different project. This keeps the codebase for the UI and model training separated but integrated and connected at the moment of deploying the model to a production environment.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/multipipeline_SparkVideo.gif){: .shadow}\n\n## Key takeaways\n\nIn this post, we addressed some of the challenges faced by ML and software teams when it comes to collaborating on delivering ML-powered applications. Some of these challenges include:\n\n* The discrepancy in the frequency with which each of these teams need to iterate on their codebases and CI/CD pipelines.\n\n* The fact that only a single set of experiment assets from an ML experimentation pipeline is relevant to the application.\n\n* The challenge of syncing a model or other experiment assets across independent codebases.   \n\nUsing The GitLab DevOps Platform and Comet, we can start bridging the gap between ML and software engineering teams over the course of a project. \n\nBy having model performance metrics adopted into software development workflows like the one we saw in the issue and merge request, we can keep track of the code changes, discussions, experiments, and models created in the process. All the operations executed by the team are recorded, can be audited, are end-to end-traceable, and (most importantly) reproducible. \n\nWatch a demo of this process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W_DsNl5aAVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_About Comet:_\nComet is an MLOps Platform that is designed to help data scientists and teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! \n\nLearn more about Comet [here](https://www.comet.ml/site/) and get started for free!\n\n\n\n",[743,9,232,1335],"AI/ML",{"slug":1337,"featured":6,"template":699},"machine-learning-on-the-gitlab-devops-platform","content:en-us:blog:machine-learning-on-the-gitlab-devops-platform.yml","Machine Learning On The Gitlab Devops Platform","en-us/blog/machine-learning-on-the-gitlab-devops-platform.yml","en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"_path":1343,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1344,"content":1350,"config":1357,"_id":1359,"_type":13,"title":1360,"_source":15,"_file":1361,"_stem":1362,"_extension":18},"/en-us/blog/meltano-functional-group-update-post",{"title":1345,"description":1346,"ogTitle":1345,"ogDescription":1346,"noIndex":6,"ogImage":1347,"ogUrl":1348,"ogSiteName":685,"ogType":686,"canonicalUrls":1348,"schema":1349},"New Meltano personas, priorities, and updates from the team","There's a lot going on — here are some of the highlights on user research, dogfooding Meltano, embedding engineers, and hiring!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678847/Blog/Hero%20Images/meltano-fgu.jpg","https://about.gitlab.com/blog/meltano-functional-group-update-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New Meltano personas, priorities, and updates from the team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":1345,"description":1346,"authors":1351,"heroImage":1347,"date":1353,"body":1354,"category":762,"tags":1355},[1352],"Jacob Schatz","2018-10-08","\nJacob Schatz here, Staff Engineer for [Meltano](https://gitlab.com/meltano)! We've been heads down working on improving Meltano, and figured it was time for an update. We've had some great conversations that have helped us identify two general personas. Our team is also growing, and we're ready for frontend contributions, but more on that later.\n\nWe've been conducting interviews to zero in on what our users will want, what they're currently doing, and what tools they're using. Over the course of those conversations, we saw two main scenarios emerge. People either wanted a command line interface (CLI) or a graphical user interface (GUI). The GUIs that exist are painful to use, and not very intuitive. In both scenarios, people we spoke with are frustrated. This goes back to the original reason [we decided to create Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/) — our data team members were relying on frustrating and expensive toolsets with poor UIs.\n\n### What are the Meltano personas?\n\nOur conversations revealed two general types of users:\n* Users who have engineers on staff\n* Users who do not have engineers on staff, or their engineers do not have bandwidth to help them\n\nThe Data team at GitLab, for example, has data engineers on staff who are willing, able, and happy to write Python. We won't be able to write every extractor and loader, so our users can follow our [specifications](https://gitlab.com/meltano/specifications), which are based off of the [Singer specifications](https://github.com/singer-io/getting-started). We want to make that as easy as possible, so Meltano can be the glue between all these different pieces.\n\nFor the other teams who don’t have the technical resources, we want to make it as if they had engineers on staff. Ideally, they'll just need to click a couple of buttons, run extract, load and transform with the extractors and loaders that we already have. Hopefully in the future the community can contribute more to these types of different extractors and loaders.\n\nYou can check out our updated [readme](https://gitlab.com/meltano/meltano/blob/master/README.md) with more info about Meltano and our personas. We're working iteratively, so if you have a different setup or scenario to share, we want to hear from you about your experience! Get in touch with us and tell us about your struggles or successes with your data team.\n\n### What’s next?\n\nWe're focused on our own CLI and GUI, and continuing to build more extractors and loaders (or [\"taps and targets\"](https://www.singer.io/)). We will be the glue that ties everything together. While current Singer taps and targets support extracting and loading, we'll be supporting much more, like removal of PII. Our CLI will support all of this from one configuration. We also want the CLI to have a really nice user experience, so I'm working with GitLab UX to help make it happen.\n\nAs always, we’re looking for contributors! In the [Dashboard project](https://gitlab.com/meltano/dashboard) you’ll see the Chart.js library that I’m building to make really nice dashboards for Meltano. Although we've had a ton of great Python contributions, we haven’t had as many contributors to the frontend, so we’d love your help there.\n\n### In other news\nThere's a lot going on, here are some of the highlights!\n\n#### Dogfooding\nIn my experience, unless one experiences the direct results of the code they write, and feel the pain their users feel when they hit a bug, one might not correctly solve the problem. Currently, we fulfill the data team's requests, but if something doesn't work they merely report back to us, without us experiencing the pain ourselves. We're changing how we work in order to imprint the idea that if something is broken, it's the Meltano team's responsibility. We’re all investigating every single pipeline failure, regardless of whose “fault” it is, because these suggest that it may be a poor user experience.\n\n#### Embedded engineers\nIn order to dogfood better, we've taken a data engineer from the data team, and an engineer from the Meltano team. They split their work 50/50 so each does half of their usual work and half of each other's work. It's already made a huge difference by giving us more eyes and ears on lots of issues, and allowing the engineers to approach problems from a different angle. Another added benefit is that every Meltano engineer gets direct exposure and experience from the data team, to make them better data scientists as well product engineers.\n\nThat's all for now, get in touch with us in our [issue tracker](https://gitlab.com/groups/meltano/-/boards), and tweet us [@meltanodata](https://twitter.com/meltanodata)!\n\nCover [image](https://unsplash.com/photos/2FPjlAyMQTA) by [John Schnobrich](https://unsplash.com/@johnschno) on Unsplash\n{: .note}\n\n[Emily von Hoffmann](https://about.gitlab.com/company/team/#emvonhoffmann) contributed to this post.\n{: .note}\n",[9,869,1356,696,1132],"git",{"slug":1358,"featured":6,"template":699},"meltano-functional-group-update-post","content:en-us:blog:meltano-functional-group-update-post.yml","Meltano Functional Group Update Post","en-us/blog/meltano-functional-group-update-post.yml","en-us/blog/meltano-functional-group-update-post",{"_path":1364,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1365,"content":1370,"config":1378,"_id":1380,"_type":13,"title":1381,"_source":15,"_file":1382,"_stem":1383,"_extension":18},"/en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"title":1366,"description":1367,"ogTitle":1366,"ogDescription":1367,"noIndex":6,"ogImage":859,"ogUrl":1368,"ogSiteName":685,"ogType":686,"canonicalUrls":1368,"schema":1369},"How to set up multi-account AWS SAM deployments with GitLab CI/CD","Our guest author, an AWS Serverless hero, shares how to automate SAM deployments using GitLab CI/CD.","https://about.gitlab.com/blog/multi-account-aws-sam-deployments-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up multi-account AWS SAM deployments with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Forrest Brazeal\"}],\n        \"datePublished\": \"2019-02-04\",\n      }",{"title":1366,"description":1367,"authors":1371,"heroImage":859,"date":1373,"body":1374,"category":762,"tags":1375},[1372],"Forrest Brazeal","2019-02-04","I've been working with [serverless](/topics/serverless/) applications in AWS\nfor about three years – that makes me an old salt in serverless terms! So I\nknow that deploying and maintaining a serverless app can be tricky; the\ntooling often has critical gaps.\n\n\nAWS's [SAM (Serverless Application\nModel)](https://aws.amazon.com/serverless/sam/) is an open source framework\nthat makes it easier to define AWS resources – such as Lambda functions, API\nGateway APIs and DynamoDB tables – commonly used in serverless applications.\nOnce you lay out your app in a SAM template, the next thing you need is a\nconsistent, repeatable way to get that template off your laptop and deployed\nin the cloud.\n\n\nYou need CI/CD.\n\n\nI've used several different [CI/CD systems](/topics/ci-cd/) to automate SAM\ndeployments, and I always look for the following features:\n\n\n- A single deployment pipeline that can build once and securely deploy to\nmultiple AWS accounts (dev, staging, prod).\n\n- Dynamic feature branch deployments, so serverless devs can collaborate in\nthe cloud without stepping on each other.\n\n- Automated cleanup of feature deployments.\n\n- Review of our SAM application directly integrated with the CI/CD tool's\nuser interface.\n\n- Manual confirmation before code is released into production.\n\n\nIn this post, we'll find out how [GitLab\nCI](/solutions/continuous-integration/) can check these boxes on its way to\ndelivering effective CI/CD for AWS SAM. You can follow along using [the\nofficial example code, available\nhere](https://gitlab.com/gitlab-examples/aws-sam).\n\n\n## Multi-account AWS deployments\n\n\nWe'll want to set up our deployment pipeline across multiple AWS accounts,\nbecause accounts are the only true security boundary in AWS. We don't want\nto run any risk of deploying prod data in dev, or vice versa. Our\nmulti-account setup will look something like this:\n\n\nAny time we work with multiple AWS accounts, we need cross-account [IAM\nroles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in\norder to authorize deployments. We'll handle this task through the following\nsteps. (All referenced scripts are available in the [example\nrepo](https://gitlab.com/gitlab-examples/aws-sam))\n\n\n### 1\\. Establish three AWS accounts for development, staging, and\nproduction deployments\n\n\nYou can use existing AWS accounts if you have them, or [provision new ones\nunder an AWS\nOrganization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html).\n\n\n### 2\\. Set up GitLab IAM roles in each account\n\n\nRun the following AWS CLI call with admin credentials in each of the three\naccounts:\n\n\n```\n\naws cloudformation deploy --stack-name GitLabCIRoles --template-file\nsetup-templates/roles.yml --capabilities CAPABILITY_NAMED_IAM\n--parameter-overrides CIAccountID=\"\u003CAWS Account ID where your GitLab CI/CD\nrunner lives>\" CIAccountSTSCondition=\"\u003CThe aws:userid for the IAM principal\nused by the Gitlab runner>\"\n  ```\n\nReplace `CIAccountID` and `CIAccountSTSCondition` as indicated with values\nfrom the AWS account where your GitLab CI/CD runner exists. (Need help\nfinding the `aws:userid` for your runner’s IAM principal? Check out [this\nguide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable).)\n\n\nThis CloudFormation template defines two roles: `SharedServiceRole` and\n`SharedDeploymentRole`. The `SharedServiceRole` is assumed by the GitLab\nCI/CD runner when calling the AWS CloudFormation service. This role trusts\nthe GitLab CI/CD runner's role. It has permissions to call the\nCloudFormation service, pass a role via IAM, and access S3 and CloudFront:\nnothing else. This role is not privileged enough to do arbitrary AWS\ndeployments on its own.\n\n\nThe `SharedDeploymentRole`, on the other hand, has full administrative\naccess to perform any AWS action. A such, it cannot be assumed directly by\nthe GitLab CI/CD runner. Instead, this role must be \"passed\" to\nCloudFormation using the service's `RoleArn` parameter. The CloudFormation\nservice trusts the `SharedDeploymentRole` and can use it to deploy whatever\nresources are needed as part of the pipeline.\n\n\n### 3\\. Create an S3 bucket for CI artifacts\n\n\nGrab the AWS account ID for each of your development, staging, and\nproduction accounts, then deploy this CloudFormation template **in the\naccount where your GitLab CI/CD Runner exists**:\n\n\n`aws cloudformation deploy --stack-name GitLabCIBucket --template-file\nsetup-templates/ci-bucket.yml --parameter-overrides DevAwsAccountId=\"\u003CAWS\nAccount ID for dev>\" StagingAwsAccountId=\"\u003CAWS Account ID for staging>\"\nProdAwsAccountId=\"\u003CAWS Account ID for prod>\" ArtifactBucketName=\"\u003CA unique\nname for your bucket>\"`\n\n\nThis CloudFormation template creates a centralized S3 bucket which holds the\nartifacts created during your pipeline run. Artifacts are created once for\neach branch push and reused between staging and production. The bucket\npolicy allows the development, test, and production accounts to reference\nthe same artifacts when deploying CloudFormation stacks -- checking off our\n\"build once, deploy many\" requirement.\n\n\n### 4\\. Assume the `SharedServiceRole` before making any cross-account AWS\ncalls\n\nWe have provided the script `assume-role.sh`, which will assume the provided\nrole and export temporary AWS credentials to the current shell. It is\nsourced in the various `.gitlab-ci.yml` build scripts.\n\n\n## Single deployment pipeline\n\n\nThat brings us to the `.gitlab-ci.yml` file you can see at the root of our\nexample repository. GitLab CI/CD is smart enough to dynamically create and\nexecute the pipeline based on that template when we push code to GitLab. The\nfile has a number of variables at the top that you can tweak based on your\nenvironment specifics.\n\n\n### Stages\n\n\nOur Gitlab CI/CD pipeline contains seven possible stages, defined as\nfollows:\n\n\n![Multi-account AWS SAM deployment model with GitLab\nCI](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-model.png){:\n.shadow.medium.center}\n\n\n```yaml\n\nstages:\n - test\n - build-dev\n - deploy-dev\n - build-staging\n - deploy-staging\n - create-change-prod\n - execute-change-prod\n```\n\n\n![Deployment lifecycle\nstages](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-lifecycle-stages.png){:\n.shadow.medium.center}\n\n\n\"Stages\" are used as a control flow mechanism when building the pipeline.\nMultiple build jobs within a stage will run in parallel, but all jobs in a\ngiven stage must complete before any jobs belonging to the next stage in the\nlist can be executed.\n\n\nAlthough seven stages are defined here, only certain ones will execute,\ndepending on what kind of Git action triggered our pipeline. We effectively\nhave three stages to any deployment: a \"test\" phase where we run unit tests\nand dependency scans against our code, a \"build\" phase that packages our SAM\ntemplate, and a \"deploy\" phase split into two parts: creating a\n[CloudFormation change\nset](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)\nand then executing that change set in the target environment.\n\n\n#### Test\n\n\nOur `.gitlab-ci.yml` file currently runs two types of tests: unit tests\nagainst our code, and dependency scans against our third-party Python\npackages.\n\n\n##### Unit tests\n\n\nUnit tests run on every branch pushed to the remote repository. This\nbehavior is defined by the `only: branches` property in the job shown below:\n\n\n```yaml\n\ntest:unit:\n stage: test\n only:\n   - branches\n script: |\n   if test -f requirements.txt; then\n       pip install -r requirements.txt\n   fi\n   python -m pytest --ignore=functions/\n```\n\n\nEvery GitLab CI/CD job runs a script. Here, we install any dependencies,\nthen execute Python unit tests.\n\n\n##### Dependency scans\n\n\n[Dependency\nscans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/),\nwhich can take a few minutes, run only on code pushed to the master branch;\nit would be counterproductive for developers to wait on them every time they\nwant to test code.\n\n\nThese scans use a hardcoded, standard Docker image to mount the code and run\n\"Docker in Docker\" checks against a database of known package\nvulnerabilities. If a vulnerability is found, the pipeline will log the\nerror without stopping the build (that's what the `allow-failure: true`\nproperty does).\n\n\n#### Build\n\n\nThe build stage turns our SAM template into CloudFormation and turns our\nPython code into a valid AWS Lambda deployment package. For example, here's\nthe `build:dev` job:\n\n\n```yaml\n\nbuild:dev:\n stage: build-dev\n \u003C\u003C: *build_script\n variables:\n   \u003C\u003C: *dev_variables\n artifacts:\n   paths:\n     - deployment.yml\n   expire_in: 1 week\n only:\n   - branches\n except:\n   - master\n```\n\n\nWhat's going on here? Note first the combination of `only` and `except`\nproperties to ensure that our development builds happen only on pushes to\nbranches that aren't `master`. We're referring to `dev_variables`, the set\nof development-specific variables defined at the top of `.gitlab-ci.yml`.\nAnd we're running a script, pointed to by `build_script`, which packages our\nSAM template and code for deployment using the `aws cloudformation package`\nCLI call.\n\n\nThe artifact `deployment.yml` is the CloudFormation template output by our\npackage command. It has all the implicit SAM magic expanded into\nCloudFormation resources. By managing it as an artifact, we can pass it\nalong to further steps in the build pipeline, even though it isn't committed\nto our repository.\n\n\n#### Deploy\n\nOur deployments use AWS CloudFormation to deploy the packaged application in\na target AWS environment.\n\n\nIn development and staging environments, we use the `aws cloudformation\ndeploy` command to create a change set and immediately execute it. In\nproduction, we put a manual \"wait\" in the pipeline at this point so you have\nthe opportunity to review the change set before moving onto the \"Execute\"\nstep, which actually calls `aws cloudformation execute-changeset` to update\nthe underlying stack.\n\n\nOur deployment jobs use a helper script, committed to the top level of the\nexample repository, called `cfn-wait.sh`. This script is needed because the\n`aws cloudformation` commands don't wait for results; they report success as\nsoon as the stack operation starts. To properly record the deployment\nresults in our job, we need a script that polls the CloudFormation service\nand throws an error if the deployment or update fails.\n\n\n## Dynamic feature branch deployments and Review Apps\n\n\n![Dynamic feature branch deployments and Review\nApps](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/dynamic-feature-branch-deployments.png){:\n.shadow.medium.center}\n\n\nWhen a non-master branch is pushed to GitLab, our pipeline runs tests,\nbuilds the [updated source\ncode](/solutions/source-code-management/), and deploys and/or\nupdates the changed CloudFormation resources in the development AWS account.\nWhen the branch is merged into master, or if someone clicks the \"Stop\"\nbutton next to the branch's environment in GitLab CI, the CloudFormation\nstack will be torn down automatically.\n\n\nIt is perfectly possible, and indeed desirable, to have multiple development\nfeature branches simultaneously deployed as live environments for more\nefficient parallel feature development and QA. The serverless model makes\nthis a cost-effective strategy for collaborating in the cloud.\n\n\nIf we are dynamically deploying our application on every branch push, we\nmight like to view it as part of our interaction with the GitLab console\n(such as during a code review). GitLab supports this with a nifty feature\ncalled [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). Review\nApps allow you to specify an \"environment\" as part of a deployment job, as\nseen in our `deploy:dev` job below:\n\n\n```yaml\n\ndeploy:dev:\n \u003C\u003C: *deploy_script\n stage: deploy-dev\n dependencies:\n   - build:dev\n variables:\n   \u003C\u003C: *dev_variables\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   url: https://${CI_COMMIT_REF_NAME}.${DEV_HOSTED_ZONE_NAME}/services\n   on_stop: stop:dev\n only:\n   - branches\n except:\n   - master\n```\n\n\nThe link specified in the `url` field of the `environment` property will be\naccessible in the `Environments` section of GitLab CI/CD or on any merge\nrequest of the associated branch. (In the case of the sample SAM application\nprovided with our example, since we don't have a front end to view, the link\njust takes you to a GET request for the `/services` API endpoint and should\ndisplay some raw JSON in your browser.)\n\n\n![Link to live\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/link-live-environment.png){:\n.shadow.medium.center}\n\n\nThe `on_stop` property specifies what happens when you \"shut down\" the\nenvironment in GitLab CI. This can be done manually or by deleting the\nassociated branch. In the case above, we have stopped behavior for dev\nenvironments linked to a separate job called `stop:dev`:\n\n\n```yaml\n\nstop:dev:\n stage: deploy-dev\n variables:\n   GIT_STRATEGY: none\n   \u003C\u003C: *dev_variables\n \u003C\u003C: *shutdown_script\n when: manual\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   action: stop\n only:\n   - branches\n except:\n   - master\n```\n\n\nThis job launches the `shutdown_script` script, which calls `aws\ncloudformation teardown` to clean up the SAM deployment.\n\n\nFor safety's sake, there is no automated teardown of staging or production\nenvironments.\n\n\n## Production releases\n\n\n![Production\nreleases](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/production-releases.png){:\n.shadow.medium.center}\n\n\nWhen a change is merged into the master branch, the code is built, tested\n(including dependency scans) and deployed to the staging environment. This\nis a separate, stable environment that developers, QA, and others can use to\nverify changes before attempting to deploy in production.\n\n\n![Staging\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/staging-environment.png){:\n.shadow.medium.center}\n\n\nAfter deploying code to the staging environment, the pipeline will create a\nchange set for the production stack, and then pause for a manual\nintervention. A human user must click a button in the Gitlab CI/CD\n\"Environments\" view to execute the final change set.\n\n\n## Now what?\n\n\nStep back and take a deep breath – that was a lot of information! Let's not\nlose sight of what we've done here: we've defined a secure, multi-account\nAWS deployment pipeline in our GitLab repo, integrated tests, builds and\ndeployments, and successfully rolled a SAM-defined serverless app to the\ncloud. Not bad for a few lines of config!\n\n\nThe next step is to try this on your own. If you'd like to start with our\nsample \"AWS News\" application, you can simply run `sam init --location\ngit+https://gitlab.com/gitlab-examples/aws-sam` to download the project on\nyour local machine. The AWS News app contains a stripped-down,\nsingle-account version of the `gitlab-ci.yml` file discussed in this post,\nso you can try out deployments with minimal setup needed.\n\n\n## Further reading\n\n\nWe have barely scratched the surface of GitLab CI/CD and AWS SAM in this\npost. Here are some interesting readings if you would like to take your work\nto the next level:\n\n\n### SAM\n\n\n- [Implementing safe AWS Lambda deployments with AWS SAM and\nCodeDeploy](https://aws.amazon.com/blogs/compute/implementing-safe-aws-lambda-deployments-with-aws-codedeploy/)\n\n- [Running and debugging serverless applications locally using the AWS SAM\nCLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-test-and-debug.html)\n\n\n### GitLab CI\n\n\n- [Setting up a GitLab Runner on\nEC2](https://hackernoon.com/configuring-gitlab-ci-on-aws-ec2-using-docker-7c359d513a46)\n\n- [Scheduled\npipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\n\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\n\nPlease [let me know](https://twitter.com/forrestbrazeal) if you have further\nquestions!\n\n\n### About the guest author\n\n\nForrest Brazeal is an [AWS Serverless\nHero](https://aws.amazon.com/developer/community/heroes/forrest-brazeal/).\nHe currently works as a senior cloud architect at\n[Trek10](https://trek10.com), an AWS Advanced Consulting Partner. You can\n[read more about Trek10's GitLab journey here](/customers/trek10/).\n",[108,9,232,721,1376,1377],"production","user stories",{"slug":1379,"featured":6,"template":699},"multi-account-aws-sam-deployments-with-gitlab-ci","content:en-us:blog:multi-account-aws-sam-deployments-with-gitlab-ci.yml","Multi Account Aws Sam Deployments With Gitlab Ci","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci.yml","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"_path":1385,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1386,"content":1392,"config":1398,"_id":1400,"_type":13,"title":1401,"_source":15,"_file":1402,"_stem":1403,"_extension":18},"/en-us/blog/protecting-manual-jobs",{"title":1387,"description":1388,"ogTitle":1387,"ogDescription":1388,"noIndex":6,"ogImage":1389,"ogUrl":1390,"ogSiteName":685,"ogType":686,"canonicalUrls":1390,"schema":1391},"How to limit access to manual pipeline gates and deployments using GitLab","Let's look at how to use protected environments to set up access controls for production deployments and manual gates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681105/Blog/Hero%20Images/protect_manual_jobs.jpg","https://about.gitlab.com/blog/protecting-manual-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to limit access to manual pipeline gates and deployments using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thao Yeager\"}],\n        \"datePublished\": \"2020-02-20\",\n      }",{"title":1387,"description":1388,"authors":1393,"heroImage":1389,"date":1395,"body":1396,"category":762,"tags":1397},[1394],"Thao Yeager","2020-02-20","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2020-02-21.\n\n{: .alert .alert-info .note}\n\n\nIn our world of automation, why would anyone want to do something manually?\nManual has become almost synonymous with inefficient. But, when it comes to\nCI/CD pipelines, a properly configured **manual** job can be a powerful way\nto control deployments and satisfy compliance requirements. Let’s take a\nlook at how manual jobs can be defined to serve two important use cases:\nControlling who can deploy, and setting up manual gates.\n\n\n## Limit access to deploy to an environment\n\n\nDeploying to production is a mission-critical occurence that should be\nprotected. Projects with a Kubernetes cluster could benefit from moving to a\ncontinuous deployment (CD) model in which a [branch or merge request, once\nmerged, is auto-deployed to\nproduction](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-deploy),\nand the absence of human intervention avoids mishaps. But for projects not\nyet configured for CD, let's consider this use case: Imagine a pipeline with\na manual job to deploy to prod, which can be triggered by any user with\naccess to push code. The risk of a unplanned, unintended production\ndeployment is very real.\n\n\nFortunately, it’s possible to use [protected\nenvironments](https://docs.gitlab.com/ee/ci/environments/protected_environments/)\nto prevent just anyone from deploying to production. When [configuring a\nprotected\nenvironment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environments),\nyou can define the roles, groups, or users to whom deploy access is granted.\nThe protected environment can then be defined in a manual job to deploy\nwhich limits who can run it. The configuration could look something like\nthis:\n\n\n```yaml\n\ndeploy_prod:\n  stage: deploy\n  script:\n    - echo \"Deploy to production server\"\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  only:\n    - master\n```\n\n\nIn the example above, the keyword `environment` is used to reference a\nprotected environment (as [configured in project\nsettings](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environment))\nwith a list of users who can run the job, in this case deploy to the named\nenvironment. Users without access see a disabled **play** button and are\nunable to execute the job.\n\n\n## Add an approval step\n\n\nCompliance rules may specify that approval is required for certain\nactivities in a workflow, even if they aren't technically a deployment step\nthemselves. In this use case, an approval step can also be added in the\npipeline that prompts an authorized user to take action to continue. This\ncan be achieved by structuring your pipeline with an \"approve\" stage\ncontaining a special manual job – for example, the YAML to insert an\napproval stage before deployment could look like this:\n\n\n```yaml\n\nstages:\n  - build\n  - approve\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - echo Hello!\n\napprove:\n  stage: approve\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  allow_failure: false\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  only:\n    - master\n```\n\n\nIn the YAML above, `allow_failure: false` [defines the manual job as\n\"blocking\"](https://docs.gitlab.com/ee/ci/yaml/#whenmanual), which will\ncause the pipeline to pause until an authorized user gives \"approval\" by\nclicking on the **play** button to resume. Only the users part of that\nenvironment list will be able to perform this action. In this scenario, the\nUI view of the pipeline in the example CI configuration above would look\nlike this:\n\n\n![Pipeline view of approval stage manual\njob](https://about.gitlab.com/images/blogimages/manual_job_approve_stage_ui.png){:\n.shadow}\n\n\n## Summary\n\n\nAs illustrated in the YAML examples and image above, manual jobs defined\nwith protected environments and blocking attributes are effective tools for\nhandling compliance needs as well as for ensuring there are proper controls\nover production deployments.\n\n\nTell us how using protected environments with manual jobs has secured your\ndeployments or whether blocking manual jobs helps you meet compliance and\nauditing. [Create an issue in the GitLab project issue\ntracker](https://gitlab.com/gitlab-org/gitlab/issues/new) to share your\nfeedback with us.\n\n\nCover image by [Diane Walton](https://unsplash.com/photos/BNnzmBmnPg4) on\n[Unsplash](https://unsplash.com)\n\n{: .note}\n",[108,9,1132,869,743],{"slug":1399,"featured":6,"template":699},"protecting-manual-jobs","content:en-us:blog:protecting-manual-jobs.yml","Protecting Manual Jobs","en-us/blog/protecting-manual-jobs.yml","en-us/blog/protecting-manual-jobs",{"_path":1405,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1406,"content":1412,"config":1418,"_id":1420,"_type":13,"title":1421,"_source":15,"_file":1422,"_stem":1423,"_extension":18},"/en-us/blog/start-using-pages-quickly",{"title":1407,"description":1408,"ogTitle":1407,"ogDescription":1408,"noIndex":6,"ogImage":1409,"ogUrl":1410,"ogSiteName":685,"ogType":686,"canonicalUrls":1410,"schema":1411},"New: How to get up and running quickly using GitLab Pages templates","We're introducing bundled GitLab Pages templates, so let's take a look at how easy it really is now to get up and running with a new site.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679908/Blog/Hero%20Images/pages-templates-cover-image.jpg","https://about.gitlab.com/blog/start-using-pages-quickly","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New: How to get up and running quickly using GitLab Pages templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-02-20\",\n      }",{"title":1407,"description":1408,"authors":1413,"heroImage":1409,"date":1415,"body":1416,"category":762,"tags":1417},[1414],"Jason Yavorska","2019-02-20","\n\nHello everyone, my name is Jason Yavorska and I'm the product manager for the [Release stage](/stages-devops-lifecycle/release/) here at GitLab, which includes GitLab Pages. In our [GitLab 11.8 release (March 2019) we're introducing](https://gitlab.com/gitlab-org/gitlab-ce/issues/47857) a quick way to select from our most popular [Pages templates](https://gitlab.com/pages?sort=stars_desc) directly from the new project setup screen. If you use GitLab.com, you can take advantage of this feature already! It looks a bit like this:\n\n![Pages Templates View](https://about.gitlab.com/images/blogimages/pages-templates-view.png){: .shadow.medium.center}\n\nNow, instead of having to fork an existing template, you can simply select one of the bundled ones and get going right away. If you're interested in one of the other templates, you can still create those in the old way – check out the [existing documentation on how to fork a template](https://docs.gitlab.com/ee/user/project/pages/index.html#fork-a-project-to-get-started-from).\n\nIn this article I'm going to show you just how effortless all of this can be. But first:\n\n## My experience contributing GitLab Pages templates\n\nFirst, though, I'd be remiss if I didn't mention that I contributed this change myself (with the help of a few key supporting players, of course.) Now, you may be wondering: I thought you were a product manager at GitLab? Not a developer? Well, that's absolutely true, but I am a hobbyist programmer on the side. I've contributed a small change here or there on my own time, but this was the largest, most complex thing that I've ever contributed myself.\n\nI always find in these situations that contributing is in some ways easier than you expect, and in some ways more challenging. Getting the code working was actually surprisingly straightforward: I was able to get our GDK ([GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/README.md)) up and running with minimal hassle, and then was able to iterate quickly until I found a working solution. Most of my challenges ended up being around getting the change through our review process and into the release. There's a lot you have to learn there, and I think it just takes some time and practice in order to have it all click. What was truly amazing, though, was all the friendly people who jumped in to help me along the way. I learned so much and am so proud of how everything came together in the end.\n\nIf you're considering making your first contribution, feel free to reach out to me on Twitter ([@j4yav](https://twitter.com/j4yav)) and I'll be happy to help guide you in the right direction. Contributing to open source is a great feeling, big or small, and if you haven't tried it before you should really give it a go.\n\n## Now let's set up a site!\n\nWith that out of the way, let's see this in action to appreciate just how painless it really is to set up a new site in GitLab pages now.\n\nThe video below walks through the steps, with full instructions underneath.\n\n Note that if you're using a private on-premise version of GitLab, be sure to check with your administrator to ensure that Pages is enabled. You may need to adjust some of the URLs in the setup below depending on your site configuration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://youtube.com/embed/C2E1M-4Jvd0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### 1. Create the new project\n\nFor this example, we'll use the [Hugo](https://gohugo.io/) template, our most popular one. Simply go to the GitLab home page, and select \"New Project\" from the top right. Click on \"Create from template,\" click on the Hugo template, and then click on \"Use template.\" Give it a name like `namespace.gitlab.io`, where `namespace` is your `username` or `groupname`.\n\n### 2. Run your first pipeline\n\nWe need to make one quick edit, which will naturally kick off a pipeline and deploy our site for the first time. What we need to do is edit our `config.toml` to have the same URL that we set up in the project name. To do this we will go to Repository → Files, click on the `config.toml` file, and then click on \"Edit\" in the toolbar. All we need to do is change the `baseurl = \"https://pages.gitlab.io/hugo/\"` line to `baseurl = \"https://namespace.gitlab.io/\"` (again, replacing `namespace` with your `username` or `groupname`).\n\nCommit your changes, then head over to CI/CD → Pipelines and look for the new pipeline that's running. You can click on the status to see the build log, or just wait for it to finish – you might be surprised at how fast this is! Once the pipeline passes, we're good to go. It may take a minute or two for everything to work through replication, but once it does, you can see your new site at `https://namespace.gitlab.io/`, beautiful template included, just waiting for you to customize further.\n\n### 3. Where to go next\n\nThere's a lot of basic configuration for your site in the `config.toml`, check that out and see what you might like to modify. The about page is in `/content/page/about.md`, and you can see example posts for your blog in `/content/post` – feel free to delete these when you're done with them. Since these are written in [markdown](https://docs.gitlab.com/ee/user/markdown.html) they are a piece of cake to edit or add new ones. Getting started with Hugo is a bit out of scope for this post, but I assure you it's quite straightforward. You can check out the [Hugo getting started pages](https://gohugo.io/getting-started/) for more ideas on what you can do. Be sure also to check out [Hugo themes](https://gohugo.io/themes/) if you're looking for inspiration.\n\nHopefully this was helpful in getting you started. Good luck with your new site!\n\nCover image by José Alejandro Cuffia(https://unsplash.com/@alecuffia) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,869,696,721],{"slug":1419,"featured":6,"template":699},"start-using-pages-quickly","content:en-us:blog:start-using-pages-quickly.yml","Start Using Pages Quickly","en-us/blog/start-using-pages-quickly.yml","en-us/blog/start-using-pages-quickly",{"_path":1425,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1426,"content":1432,"config":1439,"_id":1441,"_type":13,"title":1442,"_source":15,"_file":1443,"_stem":1444,"_extension":18},"/en-us/blog/three-levels-data-analysis",{"title":1427,"description":1428,"ogTitle":1427,"ogDescription":1428,"noIndex":6,"ogImage":1429,"ogUrl":1430,"ogSiteName":685,"ogType":686,"canonicalUrls":1430,"schema":1431},"A framework for sssessing data organization maturity","GitLab Data Engineer Emilie Schario lays out a framework for data analysis that can help an organization understand the maturity of their data team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666603/Blog/Hero%20Images/book.jpg","https://about.gitlab.com/blog/three-levels-data-analysis","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The 3 Levels of Data Analysis- A Framework for Assessing Data Organization Maturity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilie Schario\"}],\n        \"datePublished\": \"2019-11-04\",\n      }",{"title":1433,"description":1428,"authors":1434,"heroImage":1429,"date":1436,"body":1437,"category":693,"tags":1438},"The 3 Levels of Data Analysis- A Framework for Assessing Data Organization Maturity",[1435],"Emilie Schario","2019-11-04","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIf I had a nickel for every time I saw that [Data Science Hierarchy of Needs](https://hackernoon.com/the-ai-hierarchy-of-needs-18f111fcc007) visual in a presentation at a conference, I'd be a gazillionaire (technical term).\nThe pyramid, a nod to Maslow's Hierarchy of Needs, lays out that data science, in it's Machine Learning or Artificial Intelligence forms, has a series of \"needs\" or requirements that must be met in order to *actually* output AI.\n\nThis visual is great, but I've spent the last couple years working in data, and this visual doesn't capture what I do.\nML and AI are attractive subjects to talk about, but the reality for most organizations is that their data teams are incredibly immature and spend the bulk of their time working on analyses.\nData organization maturity is made up of many factors;\nit's not just the details of your machine learning models, the pedigree of your team members, or the headcount of your function.\nThe maturity of your data organization is not something that can be solved by throwing people at the problem.\n\n## A mature data organization, first and foremost, is a mature analytics organization.\n\nSo, how do you know if you are a mature analytics organization?\n\nThere are three tiers of data analysis: reporting, insights, and prediction.\nAs an organization matures in their data analyses, they move through the tiers.\nThis data analysis framework is not focused on all the things your data team will produce, nor does the framework apply to anything outside of data analysis.\nThings like recommendation engines and predictive analytics are not data analyses;\nthey're a different application of data entirely.\n\nA mature analytics organization is one part of a data function, but it is foundational to a mature data function.\nSpending an investment in *doing analytics right* will pay dividends to your data function down the road.\n\n## The Briefest History of Data\n\nBefore evaluating where data analysis is today, it's important to consider how data got here.\nOnce upon a time, data was impossible to get.\n\nYears ago, SQL was the prerequisite for answering data questions, and those lucky enough to work in an organization that maintained a centralized data warehouse still had to navigate delicate databases easily waylaid by a bad query.\n\nData analysts were the gatekeepers of data.\nAnything that was needed— from a pretty chart for a stakeholder meeting or a spreadsheet produced so business or financial analysts could further dig into the data – had to go through a data analyst.\n\nIn a world where, [knowledge workers are making thousands of decisions a day](https://www.psychologytoday.com/us/blog/stretching-theory/201809/how-many-decisions-do-we-make-each-day), we cannot let data live behind the gates.\nBusiness leaders have recognized this and are investing in building out data teams whose responsibility it is to [democratize data in their organizations](/handbook/business-technology/data-team/).\nData teams are investments in your organization, but they can only provide a return if they mature; and the first step is through reporting.\n\n## Reporting\n\nReporting is the straightforward, simplistic asking and answering of questions.\nThe answers to these simple questions give an idea of what data is needed, but doesn’t allow for the standardization, collection, or tracking of data.\n\n**When you have no answers, you never get beyond looking for facts.**\nExample reporting questions are:\n* How many new users visited our e-commerce site last week?\n* How many leads did we capture this month?\n* How many MRs were merged this week?\n\nSometimes, there is no data to answer these questions.\nThis can help identify gaps and drive conversations around the data being collected.\nWhen getting data is hard, you never move past reporting.\n\nToday, getting data is easy, at least by comparison.\nWith the rise of analytical data warehouses ([at GitLab, we use Snowflake](/handbook/business-technology/data-team/platform/#our-data-stack)) optimized for columnar analyses and incredibly cheap storage, the barriers to analyses are changing, as are the kinds of questions we want to answer.\n\nMost reporting questions are possible to answer in their recording system of truth:\n* You can build a Salesforce dashboard to show you your pipeline for the next quarter.\n* You can build a Heap dashboard to show you user retention.\n* Even [bitmapist](https://github.com/Doist/bitmapist)— an open-source Mixpanel alternative— comes with off-the-shelf user cohorting.\n\nData analysts spending their time building analyses that are available in the system of record aren’t adding value, they’re paying tolls: they’re verifying data and getting buy-in from business stakeholders.\n\nToday, the value in data analyses lies in producing insights.\n\n## Insights\n\nWhile reporting analyses are about *gathering facts* to report on them, insights are about *understanding relationships between facts.*\nDeriving insights is a result of combining systems of records, focusing on looking for relationships in the data.\nThis is different from systems informing systems, such as piping account information from Salesforce into Zendesk to see if you’re meeting your [Support SLAs](/handbook/support/performance-indicators/#service-level-agreement-sla);\ninstead it's about producing insights that can only be gathered by combining two data sources into something new.\n\nThe GitLab Data team’s [net and gross retention analyses](/handbook/customer-success/vision/#retention-and-reasons-for-churn) are a great example of insights.\nWhile subscription information comes from Zuora, our customer accounts— and how they do or don’t roll up into parent accounts— all come from Salesforce.\nIntegrating these two data sources to build out our retention analysis helps inform our Sales and Product teams.\n\nA product manager that knows their engineering team's velocity can better estimate what features will make the next release.\nA sales team that understands what their inbound marketing pipeline is looking like for next quarter is empowered to better plan their work.\nIt's not enough to know that a particular performance indicator is up or down compared to its target;\ninsights help you understand the why behind the fact.\n\nAnswering questions such as these will show the biggest impact and value to your business:\n* Which landing pages have the lowest CAC?\n* What is the average number of site visits before a user converts?\n* What is the MoM user retention in our web application?\n\nInsights are where your data analysts need to be spending their time because insights are where data teams can start providing value.\nAnalysts can only move on to providing insights if they’re not spending all their time building reporting, but accurate reporting _is_ a prerequisite to insights.\n\nA data team that spends all their time producing numbers that already exist for the sole purpose of getting stakeholder buy-in or data tool adoption will quickly find the organization frustrated, as they will not have added new value to the business.\nBeing data-driven means you’ve crossed into a place where decisions are influenced by data, not simply finding data that matches a goal.\n\n## Predictions\n\nMature data analyses are using predictions to help drive the business forward.\n\nA product manager who can estimate the financial impact, both in cost and potential return, of developing a new feature can make a much stronger case for prioritization than a product manager who has a gut feeling and crossed fingers.\nThe same is true throughout the organization.\nIf the Financial Planning and Analysis team can predict revenue, the Support team can predict hiring requirements to support all customers, and the recruiting team can predict what hiring and onboarding timelines look like for those support engineers.\n\nAn organization that is empowered with the ability to predict performance through advanced analyses is a data-driven organization;\nand, because they have reporting in place to track against those predictions, they have the mechanisms to react with when reality differs from those predictions and can adjust appropriately.\n\n## How do we mature data teams?\n\nI see you nodding your head in agreement.\nHopefully, by now, you've estimated where your team is in this framework, and you're wondering how you can help them move up to the next level.\n\n### Invest in your team\n\nData teams [tend to be 2-8% of your organization](https://blog.getdbt.com/data-team-structure-examples/), and data teams do scale with organization headcount.\nYour data team will fail if you set them up for failure through understaffing.\nThe company will be frustrated with the team and default to the tools they've always known and loved (spreadsheets - and [I hate spreadsheets](https://youtu.be/PLe9sovhtGA?t=1779)).\n\nOnce you're appropriately staffed, make sure your team is using the right tools, technologies, and processes.\nAt GitLab, we firmly believe in [DataOps](https://youtu.be/PLe9sovhtGA) and that [analytics is a subfield of software engineering](https://docs.getdbt.com/docs/viewpoint).\nMany data analysts are coming from old models where version control, the command line, and checking logs are foreign ideas.\nEnsure your team is [using modern technologies](https://meltano.com) and [leveling up along the way](/handbook/business-technology/data-team/learning-library/#data-learning-and-resources).\n\n### Empower everyone in your organization with data\n\nAllow all team members to find and build the reporting they need to do their jobs.\nBy empowering them to self-serve the reporting they need, they can gather their own facts and free up the data team to move into the next tier of analysis.\nAllowing your data team to grow and mature means putting other people in positions to access and analyze the data that they need daily.\n\nAccept that the margin of error is larger on reporting when it's not produced by a member of the data team.\nIt is more important for the data to be directionally correct and accessible than perfect and bottlenecked.\n\nThis does require trusting that reporting is facts.\nData are not opinion-based.\nReporting provides you with the answers and the person or people analyzing can formulate opinions, but reporting itself is not opinionated.\n\n### Speed to Value\n\nThe sooner there is confidence in data and your data organization through reporting, the sooner your team can start providing value through insights.\nPart of how we can implement that speed is by leveraging [open source analytics](/2019/04/15/open-source-analytics/).\nMany data teams are working through the same or similar questions and [open sourcing](/blog/managing-your-snowflake-spend-with-periscope-and-dbt/) and leveraging things like [dbt packages](https://hub.getdbt.com) can help minimize the time spent reinventing the reporting wheel.\n\nThe best practices of software can help make sure a team maintains their velocity.\nThrough data quality and freshness testing, alerting, and documentation through a tool like [dbt](https://www.getdbt.com/product/), data teams can be proactive rather than reactive, setting them up for better success.\n\nData is an incredible tool, but the road to maturity can be bumpy.\nWith a strong team, you can create a data driven organization and quickly find yourself seeing the team's value.\n\n*Special thanks to [Taylor Murphy](https://gitlab.com/tayloramurphy) and [Claire Carroll](https://gitlab.com/clrcrl) for helping me develop my thoughts on the subject and reading early drafts of this framework.*\n",[9,869],{"slug":1440,"featured":6,"template":699},"three-levels-data-analysis","content:en-us:blog:three-levels-data-analysis.yml","Three Levels Data Analysis","en-us/blog/three-levels-data-analysis.yml","en-us/blog/three-levels-data-analysis",{"_path":1446,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1447,"content":1453,"config":1459,"_id":1461,"_type":13,"title":1462,"_source":15,"_file":1463,"_stem":1464,"_extension":18},"/en-us/blog/try-dependency-scanning",{"title":1448,"description":1449,"ogTitle":1448,"ogDescription":1449,"noIndex":6,"ogImage":1450,"ogUrl":1451,"ogSiteName":685,"ogType":686,"canonicalUrls":1451,"schema":1452},"A quick guide to GitLab Dependency Scanning","A walk through of creating a quick example project in order to see Dependency Scanning in action.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681849/Blog/Hero%20Images/iceberg_header.jpg","https://about.gitlab.com/blog/try-dependency-scanning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A quick guide to GitLab Dependency Scanning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nicole Schwartz\"}],\n        \"datePublished\": \"2021-01-14\",\n      }",{"title":1448,"description":1449,"authors":1454,"heroImage":1450,"date":1456,"body":1457,"category":693,"tags":1458},[1455],"Nicole Schwartz","2021-01-14","{::options parse_block_html=\"true\" /}\n\n\n\n\nAre you curious about our Secure offerings? They are easy, and free, to try\nout!\n\n\nI suggest you create a free demo project to check them out and see if it's\nsomething you might want. \n\n\nDid you know? If you have a public project on GitLab.com you can enable our\nSecure scanning functionality. Please note that [educational\ninstitutions](https://about.gitlab.com/solutions/education/) and\n[open-source projects](https://about.gitlab.com/solutions/open-source/join/)\ncan also request free licenses.\n\n\nIn this blog I will walk you through creating a new demo project, adding\nDependency Scanning, and reviewing the results of the scan. Following the\nsteps below should take you 15 minutes.\n\n\n### Create a test project\n\n\nLet's grab a test project and enable Dependency Scanning.\n\n\n1. [Sign in](https://gitlab.com/users/sign_in) to your GitLab account.\n\n1. Create a new project by clicking \"New project\" on your [project\nlist](https://gitlab.com/dashboard/projects).\n\n![New\nproject](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/01_new_project.png){:\n.shadow.center}\n\n1. Select the \"Create from template\" option.\n\n1. Select a project template. Be sure to choose one that is written in one\nof our [supported languages and package\nmanagers](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#supported-languages-and-package-managers).\nI have chosen a [Ruby on Rails\ntemplate](https://gitlab.com/gitlab-org/project-templates/rails).\n\n![Project from\ntemplate](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/02_from_template.png){:\n.shadow.center}\n\n1. Click the \"Use template\" button.\n\n1. You need to name your project. I named mine \"mytestrubyonrails\". **Be\nsure to set the Visibility level to \"Public\"**.\n\n![Template\nsettings](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/03_template_settings.png){:\n.shadow.center}\n\n1. You now have a new project.\n\n![Your new\nproject](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/04_new_project.png){:\n.shadow.center}\n\n\n### Configure Dependency Scanning to run in the pipeline\n\n\n#### Create a new file in your project\n\n\n1. Click \"New file\".\n\n![Add a new\nfile](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/05_new_file.png){:\n.shadow.center}\n\n1. You have two choices to populate the file - Template or Advanced.\n\n\n#### Use the template to fill `.gitlab-ci.yml`\n\n\n1. On the `New file` page choose \"Select a template type > .gitlab-ci.yml\".\n\n![pick yml as file\ntemplate](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/06_yml_template.png){:\n.shadow.center}\n\n1. Select \"Apply a template > Dependency-Scanning\".\n\n![dependency scanning template\nyml](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/07_yml_ds.png){:\n.shadow.center}\n\n\n#### Advanced - manually enter data into `.gitlab-ci.yml`\n\n\n1. On the `New file` page name the file `.gitlab-ci.yml`.\n\n1. Insert the necessary lines of code per our [user\ndocumentation](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#configuration).\n\n\n```\n   stages:\n   - test\n   - qa\n\n   include:\n   - template: Dependency-Scanning.gitlab-ci.yml\n\n   dependency_scanning:\n   stage: test\n   variables:\n     CI_DEBUG_TRACE: \"true\"\n```\n\n\n![advanced\nyml](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/08_yml_advanced.png){:\n.shadow.center}\n\n\n#### Commit the file\n\n\n1. Add a commit message if you want.\n\n1. Change the \"Target Branch\" from \"master\" to something else - for example\n\"add-ds\", and leave the \"Start a new merge request with these changes\" box\nchecked.\n\n![dependency scanning template rename\ntarget](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/07_yml_template_rename.png){:\n.shadow.center}\n\n1. Click \"Commit changes\".\n\n1. A \"New Merge Request\" page will load. Scroll to the bottom and click\n\"Submit merge request\".\n\n![dependency scanning template merge request part\n1](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/07_yml_template_mr_01.png){:\n.shadow.center}\n\n![dependency scanning template merge request part\n2](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/07_yml_template_mr_02.png){:\n.shadow.center}\n\n1. The pipeline will now run.\n\n\n### View pipeline results\n\n\nNow that you have your first pipeline, this and any future pipeline will run\nthe Dependency Scanning jobs. You can review the results after a pipeline\ncompletes by:\n  1. View the Merge request - look at the security MR report area.\n![merge request security\nreport](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/09_mr_report.png){:\n.shadow.center}\n  1. Click expand to see the details.\n![expanded merge request security\nreport](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/10_mr_report_expanded.png){:\n.shadow.center}\n  1. You can also view the Security tab in the pipeline.\n![security tab in the\npipeline](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/11_pipeline_security_tab.png){:\n.shadow.center}\n\n\nNote: For this example we are going to decide not to act on the findings as\npart of the merge request, and we have not configured [security merge\nrequest\napprovals](https://docs.gitlab.com/ee/user/application_security/index.html#security-approvals-in-merge-requests)\nso findings do not require additional approvers before you are permitted to\nmerge.\n\n\nYou can see [my example merge\nrequest](https://gitlab.com/NicoleSchwartz/mytestrubyonrails/-/merge_requests/1).\n\n\n### View results outside of the merge request\n\n\nFirst, merge this request in to master for your test project. The results\nwill not show outside of the merge request until this is done.\n\n\nNow you can see the findings by navigating to the [Security\nDashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/).\n\n![navigate to the security\ndashboard](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/navigate_dashboard.png){:\n.shadow.center}\n\n![the security\ndashboard](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/dashboard.png){:\n.shadow.center}\n\n\nYou can view just the dependencies and their found issues by viewing the\n[Dependency\nList](https://docs.gitlab.com/ee/user/application_security/dependency_list/).\n\n![navigate to the dependency\nlist](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/navigate_d_list.png){:\n.shadow.center}\n\n![the dependency\nlist](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/dependency_list.png){:\n.shadow.center}\n\n![expand a row in the dependency\nlist](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/dependency_list_expanded.png){:\n.shadow.center}\n\nYou can see [my dependency\nlist](https://gitlab.com/NicoleSchwartz/mytestrubyonrails/-/dependencies).\n\n\nYou can click on a finding in the dashboard to see more details. This takes\nyou to the vulnerability's page.\n\n![stand alone vulnerability's\npage](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/stand_alone_vuln.png){:\n.shadow.center}\n\n\nOn the vulnerability's page you can decide to set the status (dismiss,\nconfirm, resolve) after triaging.\n\n![stand alone vulnerabilities\nstatus](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/stand_alone_vuln_status.png){:\n.shadow.center}\n\nYou can [see my example\nfinding](https://gitlab.com/NicoleSchwartz/mytestrubyonrails/-/security/vulnerabilities/4085028).\n\n\nYou can create an issue from a vulnerability.\n\n![stand alone vulnerabilities created\nissue](https://about.gitlab.com/images/blogimages/2020-unfiltered-try-dependency-scanning/issue_created.png){:\n.shadow.center}\n\nYou can [see my example\nissue](https://gitlab.com/NicoleSchwartz/mytestrubyonrails/-/issues/1).\n\n\nNow go on and try it yourself!\n\n\nIf the above blog walkthrough of creating a demo project and running\nDependency Scanning got you curious you can [read more about Dependency\nScanning in our user\ndocumentation](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/).\n\n\nIf you would rather try a different type of Secure scanner - they are all\njust as easy to set up [read more about our Secure scanning tools in our\nuser\ndocumentation](https://docs.gitlab.com/ee/user/application_security/#security-scanning-tools).\n\n\n[Cover image](https://flic.kr/p/4SyNQi) by [Alan\nLight](https://www.flickr.com/people/alan-light/), licensed under\n[Attribution 2.0 Generic (CC BY\n2.0)](https://creativecommons.org/licenses/by/2.0/)\n\n{: .note}\n",[869,695,9],{"slug":1460,"featured":6,"template":699},"try-dependency-scanning","content:en-us:blog:try-dependency-scanning.yml","Try Dependency Scanning","en-us/blog/try-dependency-scanning.yml","en-us/blog/try-dependency-scanning",{"_path":1466,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1467,"content":1473,"config":1481,"_id":1483,"_type":13,"title":1484,"_source":15,"_file":1485,"_stem":1486,"_extension":18},"/en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"title":1468,"description":1469,"ogTitle":1468,"ogDescription":1469,"noIndex":6,"ogImage":1470,"ogUrl":1471,"ogSiteName":685,"ogType":686,"canonicalUrls":1471,"schema":1472},"Use GitLab Duo to build and deploy a simple Quarkus-native project","This tutorial shows how a Java application is compiled to machine code and deployed to a Kubernetes cluster using a CI/CD pipeline. See how AI makes the process faster and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo to build and deploy a simple Quarkus-native project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-10-17\",\n      }",{"title":1468,"description":1469,"authors":1474,"heroImage":1470,"date":1475,"body":1476,"category":1477,"tags":1478},[738],"2024-10-17","In [“How to automate software delivery using Quarkus and\nGitLab,”](https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab/)\nyou learned how to develop and deploy a simple Quarkus-JVM application to a\nKubernetes cluster using [GitLab Auto\nDevOps](https://docs.gitlab.com/ee/topics/autodevops/). Now, you'll learn\nhow to use Quarkus-native to compile a Java application to machine code and\ndeploy it to a Kubernetes cluster using a CI/CD pipeline. Follow our journey\nfrom development to deployment leveraging [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) as our AI companion, including\nthe specific prompts we used.\n\n\n## What is Quarkus?\n\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java,\nis an open source, Kubernetes-native Java stack tailored to OpenJDK HotSpot\nand GraalVM. The Quarkus project recently moved to the [Commonhaus\nFoundation](https://www.commonhaus.org/), a nonprofit organization dedicated\nto the sustainability of open source libraries and frameworks that provides\na balanced approach to governance and support.\n\n\n## Prerequisites\n\n\nThis tutorial assumes:\n\n\n- You have a running Kubernetes cluster, e.g. GKE.\n\n- You have access to the Kubernetes cluster from your local laptop via the\n`kubectl` command.\n\n- The cluster is connected to your GitLab project.\n\n- You have [Maven (Version 3.9.6 or later)](https://maven.apache.org/)\ninstalled on your local laptop.\n\n- You have Visual Studio Code installed on your local laptop.\n\n\nIf you’d like to set up a Kubernetes cluster connected to your GitLab\nproject, you can follow the instructions in this\n[tutorial](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/),\nup to but not including the “Creating an instance of MySQL database in your\ncluster via Flux” section (you do not need a database for this tutorial).\n\n\nYou will also need to install an nginx ingress in your Kubernetes cluster.\nHere are two ways to do this:\n\n1. You can follow the instructions in [“Creating and importing\nprojects”](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/#creating-and-importing-projects),\nup to the creation of the variable `KUBE_INGRESS_BASE_DOMAIN`.\n\n2. Or, just create an ingress in your Kubernetes cluster by following the\ninstructions in our [Auto DevOps with GKE\ndocumentation](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html#install-ingress).\n\n\n**NOTE:** For this article, we used the first method above to install an\ningress and cert-manager in the Kubernetes cluster.\n\n\n## Creating necessary project files using GitLab Duo Chat\n\n\nWe started our endeavor from VS Code and an empty project called\n`quarkus-native`, which we had previously created in GitLab and had already\ncloned to our local laptop.\n\n\n1. We opened GitLab Duo Chat, within VS Code, and entered the following\nprompt:\n\n\n**_Create a “Hello World” Quarkus application that can be natively\ncompiled_**\n\n\n![hello-world-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/hello-world-prompt.png)\n\n\nChat replied with the prerequisites and process of what to do to create a\nsimple “Hello World” Quarkus application. At this point, we were after the\ngenerated body of the class `HelloResource.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport javax.ws.rs.GET;\n\nimport javax.ws.rs.Path;\n\nimport javax.ws.rs.Produces;\n\nimport javax.ws.rs.core.MediaType;\n\n\n@Path(\"/hello\")\n\npublic class HelloResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"Hello World\";\n    }\n}\n\n```\n\n\n2. We knew that for Quarkus we needed a special Dockerfile, named\n`Dockerfile.native`, as part of the creation of the image for the\napplication executable. We then entered the following prompt in Chat:\n\n\n**_Create the Dockerfile.native file for the sample application above_**\n\n\n![create-dockerfile-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675941/Blog/Content%20Images/create-dockerfile-prompt.png)\n\n\nChat replied with a very thorough answer about this file, its contents, and\nthe containerization process. At this point, we were just after the\ngenerated body of the file `Dockerfile.native`:\n\n\n```\n\n####\n\n# This Dockerfile is used in order to build a container that runs the\nQuarkus application in native (no JVM) mode\n\n####\n\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.6\n\nWORKDIR /work/\n\nRUN chown 1001 /work \\\n    && chmod \"g+rwX\" /work \\\n    && chown 1001:root /work\nCOPY --chown=1001:root target/*-runner /work/application\n\n\nEXPOSE 8080\n\nUSER 1001\n\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]\n\n```\n\n\n3. Next, we asked Chat to generate the `pom.xml` for this application by\nentering the following prompt:\n\n\n**_Create the pom.xml file for the sample application above_**\n\n\n![create-pom-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pom-prompt.png)\n\n\nChat provided the content of the `pom.xml` file, an explanation of its\ncontents, and how to use it. At this point, we were after the generated body\nof the file `pom.xml`:\n\n\n```\n\n\u003C?xml version=\"1.0\"?>\n\n\u003Cproject xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0\nhttps://maven.apache.org/xsd/maven-4.0.0.xsd\"\nxmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  \u003CmodelVersion>4.0.0\u003C/modelVersion>\n  \u003CgroupId>org.acme\u003C/groupId>\n  \u003CartifactId>hello-world-quarkus\u003C/artifactId>\n  \u003Cversion>1.0.0-SNAPSHOT\u003C/version>\n  \u003Cproperties>\n    \u003Ccompiler-plugin.version>3.10.1\u003C/compiler-plugin.version>\n    \u003Cmaven.compiler.release>11\u003C/maven.compiler.release>\n    \u003Cproject.build.sourceEncoding>UTF-8\u003C/project.build.sourceEncoding>\n    \u003Cproject.reporting.outputEncoding>UTF-8\u003C/project.reporting.outputEncoding>\n    \u003Cquarkus.platform.artifact-id>quarkus-bom\u003C/quarkus.platform.artifact-id>\n    \u003Cquarkus.platform.group-id>io.quarkus.platform\u003C/quarkus.platform.group-id>\n    \u003Cquarkus.platform.version>2.16.5.Final\u003C/quarkus.platform.version>\n    \u003CskipITs>true\u003C/skipITs>\n    \u003Csurefire-plugin.version>3.0.0-M7\u003C/surefire-plugin.version>\n  \u003C/properties>\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>${quarkus.platform.artifact-id}\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  \u003Cdependencies>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-arc\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-resteasy\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-junit5\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.rest-assured\u003C/groupId>\n      \u003CartifactId>rest-assured\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n  \u003C/dependencies>\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>quarkus-maven-plugin\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Cextensions>true\u003C/extensions>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>build\u003C/goal>\n              \u003Cgoal>generate-code\u003C/goal>\n              \u003Cgoal>generate-code-tests\u003C/goal>\n            \u003C/goals>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-compiler-plugin\u003C/artifactId>\n        \u003Cversion>${compiler-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CcompilerArgs>\n            \u003Carg>-parameters\u003C/arg>\n          \u003C/compilerArgs>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-surefire-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CsystemPropertyVariables>\n            \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n            \u003Cmaven.home>${maven.home}\u003C/maven.home>\n          \u003C/systemPropertyVariables>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-failsafe-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>integration-test\u003C/goal>\n              \u003Cgoal>verify\u003C/goal>\n            \u003C/goals>\n            \u003Cconfiguration>\n              \u003CsystemPropertyVariables>\n                \u003Cnative.image.path>${project.build.directory}/${project.build.finalName}-runner\u003C/native.image.path>\n                \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n                \u003Cmaven.home>${maven.home}\u003C/maven.home>\n              \u003C/systemPropertyVariables>\n            \u003C/configuration>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  \u003Cprofiles>\n    \u003Cprofile>\n      \u003Cid>native\u003C/id>\n      \u003Cactivation>\n        \u003Cproperty>\n          \u003Cname>native\u003C/name>\n        \u003C/property>\n      \u003C/activation>\n      \u003Cproperties>\n        \u003CskipITs>false\u003C/skipITs>\n        \u003Cquarkus.package.type>native\u003C/quarkus.package.type>\n      \u003C/properties>\n    \u003C/profile>\n  \u003C/profiles>\n\u003C/project>\n\n```\n\n\n4. We then asked Chat to generate the Quarkus test file for this application\nby entering the following prompt:\n\n\n**_Create the content for the Quarkus test for the application above_**\n\n\n![create-quarkus-test-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-quarkus-test-prompt.png)\n\n\nChat returned the body for the Quarkus test, how to use it, and the body of\nthe Java class for the test needed for its native compilation. At this\npoint, we were after the generated bodies of the file\n`HelloResourceTest.java` and `HelloResourceIT.java`.\n\n\nHere is the generated body for `HelloResourceTest.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusTest;\n\nimport org.junit.jupiter.api.Test;\n\n\nimport static io.restassured.RestAssured.given;\n\nimport static org.hamcrest.CoreMatchers.is;\n\n\n@QuarkusTest\n\npublic class HelloResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"Hello World\"));\n    }\n\n}\n\n```\n\n\nHere is the generated body for `HelloResourceIT.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusIntegrationTest;\n\n\n@QuarkusIntegrationTest\n\npublic class HelloResourceIT extends HelloResourceTest {\n    // Execute the same tests but in native mode.\n}\n\n```\n\n\n5. We needed to know how to organize these files in the GitLab project, so\nwe asked about the directory structure for all these files by entering the\nfollowing prompt in Chat:\n\n\n**_Give me the entire directory structure for this project including the\nlocation of each file, e.g. pom.xml, Dockerfile.native,\napplication.properties, HelloResource.java, HelloResourceTest.java, and the\nlocation of the target directory_**\n\n\n![create-dir-struct-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-dir-struct-prompt.png)\n\n\nChat replied with a detailed diagram about the entire directory structure\nfor the project and where all these files should be located as well as a\ndescription of the purpose of each of them. It even mentioned that the\ndirectory `target/` and its contents should not be version controlled since\nit was generated by the build process. Another interesting aspect of the\nreply was the existence of a file called `resources/application.properties`\nin the directory structure.\n\n\n![dir-struct-chat-response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/dir-struct-chat-response.png)\n\n\nWith all this information in our hands, we were ready to start creating\nthese files in our GitLab project.\n\n\n## Populating our project with the generated content for each file\n\n\nWe created each of the following files in their corresponding location and\ntheir generated content as provided by Chat:\n\n\n- `src/main/java/org/acme/HelloResource.java`\n\n- `resources/application.properties`\n\n- `src/test/java/org/acme/HelloResourceTest.java`\n\n- `src/test/java/org/acme/HelloResourceIT.java`\n\n- `pom.xml`\n\n- `Dockerfile.native`\n\n\n**NOTE:** We considered using GitLab Auto Deploy for this endeavor but later\nrealized that it would not be a supported option. We are mentioning this\nbecause in the video at the end of this tutorial, you will see that we asked\nChat: `How to set the service internalPort to 8080 for auto deploy`. Then we\ncreated a file named `.gitlab/auto-deploy-values.yaml` with the generated\ncontent from Chat. The creation of this file is not necessary for this\ntutorial.\n\n\nBefore we started tackling the pipeline to build, containerize, and deploy\nthe application to our Kubernetes cluster, we decided to generate the\nexecutable locally on our Mac and test the application locally.\n\n\n## Testing the application locally\n\n\nHere is the process we went through to test the application on our local\nmachine.\n\n\n1. To build the application on the local Mac laptop, from a Terminal window,\nwe entered the following command:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\n![first-build](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/first-build.png)\n\n\nThe native compilation failed with the error message:\n\n\n`Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System\nPATH. Install it using ‘gu install native-image’`\n\n\n2. So, we used our trusty GitLab Duo Chat again and asked it the following:\n\n\n**_The command “mvn clean package -Pnative” is failing with error\n“java.lang.RuntimeException: Cannot find the ‘native-image’ in the\nGRAALVM_HOME, JAVA_HOME and System PATH. Install it using gu install\nnative-image”. I’m using a MacOS Sonoma. How do I fix this error on my\nMac?_**\n\n\n![how-to-fix-build-failure-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-fix-build-failure-prompt.png)\n\n\nChat replied with a detailed set of steps on how to install the necessary\nsoftware and set the appropriate environment variables.\n\n\n3. We copied and pasted the following commands from the Chat window to a\nTerminal window:\n\n\n```\n\nbrew install –cask graalvm/tap/graalvm-ce-java17\n\nexport JAVA_HOME=/Library/Java/JavaVIrtualMachines/graalvm-ce-java17-22.3.1\n\nexport GRAALVM_HOME=${JAVA_HOME}\n\nexport PATH=${GRAALVM_HOME}/bin:$PATH\n\nxattr -r -d com.apple.quarantine ${GRAALVM_HOME}/../..\n\ngu install native-image\n\n```\n\n\nThe commands above installed the community edition of GraalVM Version 22.3.1\nthat supported Java 17. We noticed, during the brew install, that the\nversion of the GraalVM being installed was `java17-22.3.1`, so we had to\nupdate the pasted value for `JAVA_HOME` from `graalvm-ce-java17-22.3.0` to\n`graalvm-ce-java17-22.3.1`.\n\n\nWe also had to run the `xattr` command to get the GraalVM, which we had\ndownloaded and installed on our Mac, out of quarantine so that it could run\nlocally. Lastly, we installed the GraalVM native-image.\n\n\n4. At this point, we again, from a Terminal window, entered the following\ncommand to build the application on the local Mac laptop:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\nThis time the compilation was successful and an executable was generated in\nthe `target` directory.\n\n\n![successful-local-compilation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/successful-local-compilation.png)\n\n\n5. We ran the executable by entering the following commands from a Terminal\nwindow:\n\n\n```\n\ncd target\n\n./quarkus-native-1.0.0-SNAPSHOT-runner “-Dquarkus.http.host=0.0.0.0”\n\n```\n\n\n![executable-local-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/executable-local-run.png)\n\n\n6. With the application running, we opened a browser window, and in the URL\nfield, we entered:\n\n\n```\n\nhttp://localhost:8080/hello\n\n```\n\n\n![app-running-locally](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-locally.png)\n\n\nThe application returned the string `Hello World`, which was displayed in\nthe browser window.\n\n\nAt this point, we committed and pushed all the changes to our GitLab project\nand started working on creating a CI/CD pipeline that would build and deploy\nthe application to a Kubernetes cluster running on the cloud.\n\n\nBut before continuing, we remembered to add, commit, and push a `.gitignore`\nfile to our project that included the path `target/`, since this was the\ndirectory where the executable would be created and we didn’t need to keep\nit - or its contents - under version control.\n\n\n## Creating the pipeline with GitLab Duo Chat\n\n\nNow that we had already successfully tested the application locally on our\nMac, we needed to create the CI/CD pipeline that would compile the\napplication, containerize it, and deploy it to our Kubernetes cluster. We\nwanted to keep the pipeline simple, brief, and have a single environment in\nwhich to deploy it. To this end, the pipeline would not tackle multiple\nenvironments or feature branches, for example.\n\n\n1. To avoid manually creating a pipeline from scratch, we decided to once\nagain leverage Chat. We entered the following prompt\n\n\n**_Create a .gitlab-ci.yml file with 3 stages: build, containerize, and\ndeploy. Each of these stages should have a single job with the same name.\nThe build job should compile the application natively using the -Pnative\nmaven option and the builder image for mandrel jdk-22 for java17 and store\nthe application executable and its Dockerfile as artifacts. The containerize\njob should use docker to build and push the image to the built-in container\nregistry. The deploy job should rollout the containerized application, named\nquarkus-app, to the production environment in the Kubernetes cluster by\ncreating a deployment resource, a service resource and an ingress rule with\nhost ${KUBE_INGRESS_BASE_DOMAIN} and /hello prefix. The deploy job should\nalso include the url for the production environment. When setting the\nKubernetes context, use ${KUBE_CONTEXT}, and for application namespace, use\n${KUBE_NAMESPACE}_**\n\n\n![create-pipeline-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pipeline-prompt.png)\n\n\nThe prompt above provided some points that we thought would be useful and\nnecessary for a good outcome. The requirements listed by the prompt were:\n\n\n- It specified that the pipeline needed to have three stages: build,\ncontainerize, and deploy.\n\n- Each stage would have a single job.\n\n- The build job should natively compile (not build) the application using\nthe -Pnative option.\n\n- The build job should use the open source software `mandrel` and use the\nversion that would support jdk-22 for Java 17. Remember that these were the\nversions that we tested on our local machine.\n\n- The build job would also need to pass the resulting executable and the\nproject Dockerfile to the next job, i.e. containerize, in the pipeline.\n\n- The containerize job should build a docker image with the executable using\nthe Dockerfile, both of which were passed as artifacts.\n\n- The containerize job should push the image to the GitLab container\nregistry.\n\n- The deploy job should use the image from the GitLab container registry,\nname it `quarkus-app`, and deploy it to the Kubernetes cluster using\ndeployment, service and ingress rule resources.\n\n- The deploy job should use the environment variables\n`${KUBE_INGRESS_BASE_DOMAIN}`, `${KUBE_CONTEXT}`, and `${KUBE_NAMESPACE}`,\nwhich had been defined as group/project variables, accordingly.\n\n\nChat responded with the following `.gitlab-ci.yml` example:\n\n\n```\n\n# Define the stages for the pipeline\n\nstages:\n  - build\n  - containerize\n  - deploy\n\n# Variables section to use GitLab CI/CD variables\n\nvariables:\n  MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  MAVEN_CLI_OPTS: \"--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true\"\n  CONTAINER_IMAGE: ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}\n\n# Cache configuration for Maven dependencies\n\ncache:\n  paths:\n    - .m2/repository\n\n# Build stage: Compile the application natively\n\nbuild:\n  stage: build\n  image:\n    name: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n      - \"\"\n  script:\n    - ./mvnw ${MAVEN_CLI_OPTS} clean package -Pnative\n  artifacts:\n    paths:\n      - target/*-runner\n      - src/main/docker/Dockerfile.native-micro\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Containerize stage: Build and push Docker image\n\ncontainerize:\n  stage: containerize\n  image: docker:20.10.16\n  services:\n    - docker:20.10.16-dind\n  variables:\n    DOCKER_TLS_CERTDIR: \"/certs\"\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -f src/main/docker/Dockerfile.native-micro -t ${CONTAINER_IMAGE} .\n    - docker push ${CONTAINER_IMAGE}\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Deploy stage: Deploy to Kubernetes\n\ndeploy:\n  stage: deploy\n  image: \n    name: bitnami/kubectl:latest\n    entrypoint: ['']\n  script:\n    - kubectl config use-context ${KUBE_CONTEXT}\n    - kubectl config set-context --current --namespace=${KUBE_NAMESPACE}\n    - |\n      cat \u003C\u003CEOF | kubectl apply -f -\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: quarkus-app\n      spec:\n        replicas: 1\n        selector:\n          matchLabels:\n            app: quarkus-app\n        template:\n          metadata:\n            labels:\n              app: quarkus-app\n          spec:\n            containers:\n            - name: quarkus-app\n              image: ${CONTAINER_IMAGE}\n              ports:\n              - containerPort: 8080\n      ---\n      apiVersion: v1\n      kind: Service\n      metadata:\n        name: quarkus-app\n      spec:\n        selector:\n          app: quarkus-app\n        ports:\n        - port: 80\n          targetPort: 8080\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: quarkus-app\n      spec:\n        rules:\n        - host: ${KUBE_INGRESS_BASE_DOMAIN}\n          http:\n            paths:\n            - path: /hello\n              pathType: Prefix\n              backend:\n                service:\n                  name: quarkus-app\n                  port: \n                    number: 80\n      EOF\n    - echo \"Application deployed to https://${KUBE_INGRESS_BASE_DOMAIN}/hello\"\n  # This job runs only on the main branch\n  only:\n      - main\n```\n\n\n2. There were some things we needed to adjust in the sample `.gitlab-ci.yml`\nfile above before we could commit it to our `main` branch. These are the\nupdates we made to the file:\n\n\n- We deleted all occurrences of `only: -main` because we wanted to keep of\npipeline definition file simple and with no branch-related rules.\n\n- We fixed the name of the file `Dockerfile.native-micro` to\n`Dockerfile.native`.\n\n\n3. At this point, we wanted to ensure that the deployment would be to the\n`production` environment so we asked Chat the following prompt:\n\n\n**_What is the syntax to specify an environment with its url in a\npipeline?_**\n\n\n![how-to-add-env-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-env-prompt.png)\n\n\nThe response from Chat included an example of how to do this so we used this\ninformation to add the following environment block to our pipeline:\n\n\n```\n  environment:\n       name: production\n       url: http://${KUBE_INGRESS_BASE_DOMAIN}/hello\n```\n\n\n4. The example provided by Chat includes a URL that started with `https` and\nwe modified that to `http` since we didn’t really need a secure connection\nfor this simple application.\n\n\n5. Lastly, we noticed that in the `build` job, there was a script `mvnw`\nthat we didn’t have in our project. So, we asked Chat the following:\n\n\n**_How can I get the mvnw script for Quarkus?_**\n\n\n![how-to-add-mvnw-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-mvnw-prompt.png)\n\n\nChat responded with the command to execute to bootstrap and create this\nscript. We executed this command from a Terminal window:\n\n\n```\n\nmvn wrapper:wrapper\n\n```\n\n\nWe were now ready to commit all of our changes to the `main` branch and have\nthe pipeline executed. However, on our first attempt, our first pipeline\nfailed at the build job.\n\n\n## Troubleshooting using GitLab Duo Root Cause Analysis\n\n\nOur first attempt at running our brand-new pipeline failed. So, we took\nadvantage of [GitLab Duo Root Cause\nAnalysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/),\nwhich looks at the job logs and provides a thorough natural language\nexplanation (with examples) of the root cause of the problem and, most\nimportantly, how to fix it.\n\n\n![build-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/build-job-troubleshooting.png)\n\n\nRoot Cause Analysis recommended we look at the compatibility of the command\nthat was trying to be executed with the image of mandrel used in the build\njob. We were not using any command with the image so we concluded that it\nmust have been the predefined `entrypoint` for the image itself. We needed\nto override this so we asked Chat the following:\n\n\n**_How do I override the entrypoint of an image using gitlab keywords?_**\n\n\n![how-to-override-entrypoint-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-override-entrypoint-prompt.png)\n\n\nChat replied with some use case examples of overriding an image entry point.\nWe used that information to update the build job image definition:\n\n\n```\n\nbuild:\n    stage: build\n    image: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n        - “”\n```\n\n\nWe committed our changes to the `main` branch, which launched a new instance\nof the pipeline. This time the build job executed successfully but the\npipeline failed at the `containerize` job.\n\n\n## Running a successful pipeline\n\n\nBefore drilling down into the log of the failed `containerize` job, we\ndecided to drill into the log of the successfully completed build job first.\nEverything looked good in the log of the build job with the exception of\nthis warning message at the very end of it:\n\n\n```\n\nWARNING: src/main/docker/Dockerfile.native: no matching files. Ensure that\nthe artifact path is relative to the working directory …\n\n``` \n\n\nWe took notice of this warning and then headed to the log of the failed\n`containerize` job. In it, we saw that the `docker build` command had failed\ndue to a non-existent Dockerfile. We ran Root Cause Analysis on the job and\namong its suggested fixes was for us to verify that the project structure\nmatched the path of the specified `Dockerfile.native` file.\n\n\n![containerize-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/containerize-job-troubleshooting.png)\n\n\nThis information confirmed our suspicion of the misplaced\n`Dockerfile.native` file. Instead of being at the directory\n`src/main/docker` as specified in the pipeline, it was located at the root\ndirectory of the project.\n\n\nSo, we went back to our project and updated every occurrence of the location\nof this file in our `.gitlab-ci.yml` file. We modified the two locations\nwhere this happened, one in the `build` job and one in the `containerize`\njob, as follows:\n\n\n```\n\nsrc/main/docker/Dockerfile.native\n\n```\n\n\nto\n\n\n```\n\nDockerfile.native\n\n```\n\n\nWe committed our updates to the `main` branch and this time our entire\npipeline executed successfully!\n\n\n![pipeline-successful-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/pipeline-successful-run.png)\n\n\nOur last step was to check the running application in the `production`\nenvironment in our Kubernetes cluster.\n\n\n## Accessing the deployed application running in cluster\n\n\nOnce the pipeline ran successfully to completion, we drilled in the log file\nfor the `deploy` job. Remember, this job printed the URL of the application\nat the end of its execution. We scrolled down to the bottom of the log and\nclicked on the `https` application link, which opened a browser window\nwarning us that the connection was not private (we disabled `https` for the\nenvironment URL but forgot it for this string). We proceeded past the\nbrowser warning and then the string \"Hello World\" was displaced in the\nbrowser window indicating that the application was up and running in the\nKubernetes cluster.\n\n\nFinally, to double-check our production deployment URL, we headed to the\nproject **Operate > Environments** window, and clicked on the \"Open\" button\nfor it, which immediately opened a browser window with the \"Hello World\"\nmessage.\n\n\n![app-running-on-k8s](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-on-k8s.png)\n\n\n## Try it \n\n\nWe created, compiled, built, and deployed a simple Quarkus application to a\nKubernetes cluster using [GitLab Duo](https://about.gitlab.com/gitlab-duo/).\nThis approach allowed us to be more efficient and productive in all the\ntasks that we performed and it helped us streamline our DevSecOps processes.\nWe have shown only a small portion of how GitLab Duo's AI-powered\ncapabilities can help you, namely Chat and Root Cause Analysis. There’s so\nmuch more you can leverage in GitLab Duo to help you create better software\nfaster and more securely.\n\n\nWatch this whole use case in action:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDpycxz3RPY?si=HHZrFt1O_8XoLATf\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nAll the project assets we used are available\n[here](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/quarkusn/quarkus-native).\n\n\n> [Try GitLab Duo for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\nand get started on exciting projects like this.\n","ai-ml",[1335,870,1479,9,869,1480,108],"DevSecOps","product",{"slug":1482,"featured":90,"template":699},"use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","content:en-us:blog:use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","Use Gitlab Duo To Build And Deploy A Simple Quarkus Native Project","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"_path":1488,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1489,"content":1495,"config":1503,"_id":1505,"_type":13,"title":1506,"_source":15,"_file":1507,"_stem":1508,"_extension":18},"/en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"ogTitle":1490,"schema":1491,"ogImage":1492,"ogDescription":1493,"ogSiteName":685,"noIndex":6,"ogType":686,"ogUrl":1494,"title":1490,"canonicalUrls":1494,"description":1493},"Build enterprise-grade IaC pipelines with GitLab DevSecOps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab and Ansible to create infrastructure as code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brad Downey\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-01\",\n      }","https://res.cloudinary.com/about-gitlab-com/image/upload/v1746211002/zlet4rmfg2z0j6lg16mc.png","Learn how to transform infrastructure automation into scalable, secure pipelines using GitLab, Terraform/OpenTofu, and Ansible with integrated security scanning and CI/CD.","https://about.gitlab.com/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"heroImage":1492,"body":1496,"authors":1497,"updatedDate":1500,"date":1501,"title":1490,"tags":1502,"description":1493,"category":762},"Infrastructure-as-code tools like TerraForm/OpenTofu and configuration\nmanagement tools like Ansible are often part of mission-critical workflows.\nSuch projects sometimes start as simple automations and are not necessarily\nsubject to the same software development best practices and regulatory\ncontrols as business software applications.\n\n\nAt the same time many of these automations are developed by system engineers or infrastructure engineers who may not have as much experience with DevOps, DevSecOps, CI/CD, and test automation practices. This becomes even more complicated when you work in a large enterprise organization with multiple engineers and siloed teams.\n\n\nAt GitLab we know DevSecOps and we have been using our unified DevSecOps platform for enterprise-scale, mission-critical automation workloads for more than 10 years. We have thousands of customers who use GitLab as a foundation for infrastructure as code (IaC), automation, cloud, and platform engineering practices.\n\n\nIn this article, we showcase some of the key features teams can leverage to turn their powerful automations into scalable and auditable software delivery pipelines.\n\n\n![Automation listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/oipm6tq8qutoh1ctredd.png)\n\n\n## Implementation\n\n\n[This project](https://gitlab.com/gl-demo-ultimate-saberkan/public/ansible-demo) demonstrates a comprehensive DevOps workflow that combines the power of OpenTofu with modern Ansible practices, all orchestrated through GitLab CI/CD pipelines. The solution showcases how to provision an AWS lab environment using OpenTofu components integrated with GitLab, and then deploy a Tomcat web server using modern Ansible, including custom execution environments and collections.\n\n\nThe project leverages numerous GitLab features:\n\n\n* Building and storing custom Ansible execution environments in the [GitLab Container Registry](https://docs.gitlab.com/user/packages/container_registry/)\n\n* [Security scanning for infrastructure as code and container vulnerabilities](https://docs.gitlab.com/user/application_security/iac_scanning/)\n\n* Integrating [Ansible linting with GitLab's Code Quality](https://docs.gitlab.com/user/application_security/iac_scanning/)\n\n* Storing Tomcat binaries in the [Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/)\n\n* Utilizing [CI/CD environment variables for configuration](https://docs.gitlab.com/ci/variables/)\n\n\nThe entire workflow is automated through a [GitLab pipeline](https://docs.gitlab.com/ci/pipelines/) that handles everything from infrastructure provisioning to application deployment and security testing.\n\n\n![ Workflow automated through a GitLab pipeline ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\n\n### Provisioning the environment with OpenTofu\n\n\nThe project begins with provisioning an AWS lab environment using OpenTofu. This is achieved through native integration with [GitLab's OpenTofu components](https://docs.gitlab.com/user/infrastructure/iac/), which streamline the infrastructure provisioning process. The pipeline includes validate, plan, and apply stages that ensure proper infrastructure deployment while maintaining GitLab's IaC best practices.\n\n\nThis project is leveraging [GitLab's Terraform State management](https://docs.gitlab.com/user/infrastructure/iac/terraform_state/) and [Terraform Module Registry](https://docs.gitlab.com/user/packages/terraform_module_registry/) capabilities. Both of these features are compatible with OpenTofu and HashiCorp Terraform. GitLab OpenTofu components can also be used with HashiCorp Terraform with [slight customization](https://gitlab.com/components/opentofu#can-i-use-this-component-with-terraform). You'll need to build your own image that includes a script named `gitlab-tofu` to keep it compatible with the component jobs then you can then modify `tofu` commands with `terraform` commands.\n\n\nThe OpenTofu module release component is a sample demonstrating how to build a Terraform module and store it in GitLab's Terraform module registry. The `provision_lab.tf` file imports this module directly from GitLab to deploy the lab environment in AWS. Upon completion, it outputs an inventory file containing the public IP address of the provisioned instance, which can be used in configuration management stages with Ansible.\n\n\n```\n\n# From .gitlab-ci.yml\n - component: gitlab.com/components/opentofu/module-release@1.1.0\n   inputs:\n     root_dir: tofu\n     as: 🔍 tofu-module-release\n     stage: 🏗️ build-tofu-module\n     module_version: 0.0.1\n     module_system: aws\n     module_name: aws-lab\n     root_dir: tofu/modules/ansible-demo/aws-lab\n     rules:\n       - if: \"$CI_COMMIT_BRANCH\"\n         when: manual\n```\n\n\n```\n\n# From provision_lab.tf\n\nmodule \"aws-lab\" {\n  source = \"https://gitlab.com/api/v4/projects/67604719/packages/terraform/modules/aws-lab/aws/0.0.1\"\n}\n\n```\n\n\nThe validate, plan, and deploy components are configured with `**auto_define_backend: true**`, which automatically integrates with GitLab's built-in Terraform state backend. This approach eliminates the need for manual backend configuration or external state storage solutions like S3 buckets.\n\n\n```\n\n# From gitlab-ci.yml\n\n- component: gitlab.com/components/opentofu/apply@0.55.0\n  inputs:\n    version: 0.55.0\n    opentofu_version: 1.8.8\n    root_dir: tofu\n    state_name: demo\n    as: ✅ tofu-apply\n    stage: 🏗️ provision-lab\n    auto_define_backend: true\n    rules:\n      - if: \"$CI_COMMIT_BRANCH\"\n        when: manual\n```\n\n\n![Validate, plan, and deploy components are configured with `auto_define_backend: true`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\n\nThe infrastructure configuration creates a CentOS Stream 9 EC2 instance with appropriate security groups for SSH access from GitLab runners and HTTP access to the Tomcat server.\n\n\nSSH access and HTTP configuration are configuration thought [GitLab CI/CD environment variables](https://docs.gitlab.com/ci/variables/#define-a-cicd-variable-in-the-ui).\n\n\n![SSH access and HTTP configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/cmqtzg6ahz8ua5w8ybgs.png)\n\n\nFor secure cloud access, the project implements [GitLab's OpenID Connect integration](https://docs.gitlab.com/ci/cloud_services/aws/) with AWS, using temporary credentials through AWS Security Token Service (STS):\n\n\n```\n\n# From .gitlab-ci.yml\n\n.tofu_aws_setup:\n id_tokens:\n   OIDC_TOKEN:\n     aud: https://gitlab.com\n before_script:\n   - echo \"${OIDC_TOKEN}\" > /tmp/web_identity_token\n   - export AWS_PROFILE=\"\"\n   - export AWS_ROLE_ARN=\"${AWS_ROLE_ARN}\"\n   - export AWS_WEB_IDENTITY_TOKEN_FILE=\"/tmp/web_identity_token\"\n```\n\n\n### Building the Ansible execution environment\n\n\nA key aspect of modern Ansible deployments is the use of [execution environments](https://docs.ansible.com/ansible/latest/getting_started_ee/index.html), containerized versions of Ansible with all necessary dependencies including roles and collections pre-installed. This project creates a custom execution environment based on Fedora 39, which includes ansible-core, ansible-runner, and additional collection such as ansible.posix required in this example for firewall and selinux configuration.\n\n\nThe third-party roles and collections in this project are natively downloaded from the community Ansible Galaxy repository. This approach leverages the community ecosystem of reusable Ansible content, as shown in the execution environment configuration. While this demo utilizes community Ansible resources, the exact same pipeline implementation is fully compatible with Red Hat Ansible Automation Platform. The pipeline structure remains identical, with only the content sources changing. Organizations using the enterprise version can simply redirect their automation content sources to their private Automation Hub instead of the default community Ansible Galaxy. According to the official enterprise documentation, this can be achieved by [configuring your private Automation Hub server and access token in the ansible.cfg](https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/1.2/html/getting_started_with_red_hat_ansible_automation_hub/proc-configure-automation-hub-server#proc-configure-automation-hub-server).\n\n\n```\n\n# From execution-environment.yml\n\n---\n\nversion: 3\n\n\nimages:\n  base_image:\n    name: quay.io/fedora/fedora:39\n\ndependencies:\n  ansible_core:\n    package_pip: ansible-core\n  ansible_runner:\n    package_pip: ansible-runner\n  system:\n    - openssh-clients\n    - sshpass\n  galaxy:\n    collections:\n    - name: ansible.posix\n      version: \">=2.0.0\"\n```\n\n\n![Execution environment pushed to GitLab's Container Registry ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433384/dh1o2ojjmb04ru4tfr9k.png)\n\n\nThe execution environment is defined in a YAML file and built using ansible-builder, then pushed to [GitLab's Container Registry](https://docs.gitlab.com/user/packages/container_registry/). This approach ensures consistent execution environments across different systems and simplifies dependency management.\n\n\n```\n\n# From gitlab-ci.yml\n\n🔨 ansible-build-ee:\n  stage: 📦 ansible-build-ee\n  image: docker:24.0.5\n  needs: []\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - apk add --no-cache python3 py3-pip\n    - pip install ansible-builder\n    - cd ansible/execution-environment\n  script:\n    - ansible-builder build -t ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} --container-runtime docker\n    - docker tag ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n```\n\n\n### Deploying Tomcat with Ansible\n\n\nOnce the infrastructure is provisioned and the execution environment is built, the pipeline deploys Tomcat using [Ansible Navigator](https://ansible.readthedocs.io/projects/navigator/). The execution environment built in previous stage is used as image for deployment job in GitLab pipeline.\n\n\n```\n\n# From gitlab-ci.yml\n\n🚀 ansible-deploy:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs:\n    - ✅ tofu-apply\n  extends: [.ssh_private_key_setup, .default_rules]\n  script:\n    - ansible-navigator run ansible/playbook.yml\n      -i ansible/inventory/hosts.ini\n      --execution-environment false\n      --mode stdout\n      --log-level debug\n```\n\n\nThe Tomcat deployment fetches the application package from [GitLab's Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/), configures system users and permissions, and sets up Tomcat as a systemd service.\n\n\n```\n\n# From playbook.yml\n\n---\n\n- name: Deploy Tomcat Server\n  hosts: all\n  become: true\n  roles:\n      - role: tomcat\n\n  vars:\n    # Tomcat package and installation\n    tomcat_package: \"https://gitlab.com/api/v4/projects/67604719/packages/generic/apache-tomcat/10.1.39/apache-tomcat-10.1.39.tar.gz\"\n    tomcat_install_dir: \"/opt/tomcat\"\n    java_package: \"java-17-openjdk-devel\"\n```\n\n\n![GitLab Package Registry](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/mynak8i2k7ms9vhdijqg.png)\n\n\n### Security scanning and code quality\n\n\nSecurity is integrated throughout the pipeline with multiple scanning tools. The project uses [GitLab's built-in SAST IaC scanner](https://docs.gitlab.com/user/application_security/iac_scanning/) to detect vulnerabilities in both Terraform and Ansible code. [Container scanning](https://docs.gitlab.com/user/application_security/container_scanning/) is applied to the execution environment image to identify any security issues and generate a [software bill of materials (SBOM)](https://docs.gitlab.com/user/application_security/container_scanning/#cyclonedx-software-bill-of-materials).\n\n\n```\n\n# From gitlab-ci.yml\n\ninclude:\n\n- template: Jobs/SAST-IaC.gitlab-ci.yml\n\n- template: Jobs/Container-Scanning.gitlab-ci.yml\n\n```\n\n\n![Security is integrated throughout the pipeline with multiple scanning tools](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433386/e6ejckcv5kdyhhosej2f.png)\n\n\n\n\n![Dependency listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\n\nAdditionally, the project integrates Ansible Linter with [GitLab's Code Quality](https://docs.gitlab.com/ci/testing/code_quality/#import-code-quality-results-from-a-cicd-job). This integration produces reports that are displayed directly in the GitLab interface, making it easy to identify and address issues.\n\n\n```\n\n# From gitlab-ci.yml\n\n🔍 ansible-lint:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs: []\n  script:\n    - ansible-lint ansible/playbook.yml -f codeclimate | python3 -m json.tool | tee gl-code-quality-report.json || true\n  artifacts:\n    reports:\n      codequality:\n        - gl-code-quality-report.json\n```\n\n\n![The project integrates Ansible Linter with GitLab code quality](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\n\n### Health-checking the deployment\n\n\nAfter deployment, the pipeline performs health checks to ensure that the Tomcat server is running correctly. The health-check job attempts to connect to the server's HTTP port and verifies that it returns a successful response. This ensures that the deployment has completed successfully, and the application is accessible.\n\n\nYou can test access from your browser into the Tomcat-provisioned instance using the public IP address of the EC2 provisioned instance.\n\n\n![Checking the health of a job](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433385/uksdkjryydxhu94v1naj.png)\n\n\n## Destroying the lab environment\n\n\nThe final stage of the pipeline is the cleanup process, which destroys the lab environment. This is implemented using the OpenTofu destroy component, which ensures that all resources created during the provisioning stage are properly removed.\n\n\n## Summary\n\n\nGitLab provides a unified DevSecOps platform and a framework to manage enterprise-scale, mission-critical infrastructure as code and configuration management automation practices. The framework includes version control, project planning and issue management, team collaboration, CI/CD pipelines, binary package and container registry, security scanning, and many other helpful features along with the ability to embed governance and controls in the processes. If you are looking to expand your private or public cloud practices or in general any governed, self-service automation workflow, consider GitLab, TerraForm, and Ansible as the three-legged stool and the foundation for a scalable and governed automation platform.\n\n\n> Get started with a [free trial of GitLab Ultimate](http://bout.gitlab.com/free-trial/). Sign up today!\n",[1498,1499],"George Kichukov","Salahddine Aberkan","2025-04-24","2019-07-01",[9,108],{"slug":1504,"featured":6,"template":699},"using-ansible-and-gitlab-as-infrastructure-for-code","content:en-us:blog:using-ansible-and-gitlab-as-infrastructure-for-code.yml","Using Ansible And Gitlab As Infrastructure For Code","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code.yml","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"_path":1510,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1511,"content":1517,"config":1523,"_id":1525,"_type":13,"title":1526,"_source":15,"_file":1527,"_stem":1528,"_extension":18},"/en-us/blog/working-on-two-git-branches-at-the-same-time",{"title":1512,"description":1513,"ogTitle":1512,"ogDescription":1513,"noIndex":6,"ogImage":1514,"ogUrl":1515,"ogSiteName":685,"ogType":686,"canonicalUrls":1515,"schema":1516},"How to work on two Git branches at the same time","Watch the demo on how using the GitLab Web IDE and your local dev environment to work on two branches at once can help save time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678782/Blog/Hero%20Images/working-on-two-git-branches-at-the-same-time.jpg","https://about.gitlab.com/blog/working-on-two-git-branches-at-the-same-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to work on two Git branches at the same time\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-10-03\",\n      }",{"title":1512,"description":1513,"authors":1518,"heroImage":1514,"date":1520,"body":1521,"category":762,"tags":1522},[1519],"William Chia","2018-10-03","\nI was recently using both my local development environment and the GitLab [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), and found a really nice workflow for working with two Git branches simultaneously.\n\n### The problem\n\nIn this scenario, you’re doing development work on one branch, in one part of your codebase, and then likely documenting your process in another place. I really don’t want all of this in one merge request, because I don’t want to delay shipping the development work if [the docs](https://docs.gitlab.com) aren’t done. I want to be able to get it live so that others can see it, give feedback on each individual component, and iterate on it. At the same time, I don’t want to delay too long on documenting the process, because I want the docs to be as accurate and reproducible as possible.\n\n### The fix\n\nWhile doing my development work in my local development environment, I created another merge request for the documentation using the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), essentially working on two different Git branches at the same time, using two different editors.\n\nIn my quick example below, you can see a merge request to add Jenkins content to our [DevOps tools](/competition/) page. I’ve checked out this branch locally, and I have it open in my Atom editor. I’ve been doing some work by updating `features.yml`, as well as a Markdown file and a Haml file. All of these changes are related to one merge request. While I’m committing changes locally to the comparison page, I’m documenting each step in my Web IDE in a separate tab, to make sure my instructions are precise, helpful, and completed in real time.\n\n### Watch the demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uV3ycYnwhBc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can see what we've got planned for the Web IDE in 2019 in our post about [our product vision for DevOps Create](/blog/create-vision/).\n\nWhat are other ways the Web IDE has come in handy for you? Let us know by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover [photo](https://unsplash.com/photos/3y1zF4hIPCg) by [Hans-Peter Gauster](https://unsplash.com/photos/3y1zF4hIPCg) on Unsplash\n{: .note}\n",[9,869,1356,696,1132],{"slug":1524,"featured":6,"template":699},"working-on-two-git-branches-at-the-same-time","content:en-us:blog:working-on-two-git-branches-at-the-same-time.yml","Working On Two Git Branches At The Same Time","en-us/blog/working-on-two-git-branches-at-the-same-time.yml","en-us/blog/working-on-two-git-branches-at-the-same-time",{"_path":1530,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1531,"content":1537,"config":1545,"_id":1547,"_type":13,"title":1548,"_source":15,"_file":1549,"_stem":1550,"_extension":18},"/en-us/blog/write-vulnerability-detection-rules",{"title":1532,"description":1533,"ogTitle":1532,"ogDescription":1533,"noIndex":6,"ogImage":1534,"ogUrl":1535,"ogSiteName":685,"ogType":686,"canonicalUrls":1535,"schema":1536},"How to write and continuously test vulnerability detection rules for SAST","Interns with the Google Summer of Code helped GitLab transition from our old SAST tools to Semgrep.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667819/Blog/Hero%20Images/anomaly-detection-cover.png","https://about.gitlab.com/blog/write-vulnerability-detection-rules","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to write and continuously test vulnerability detection rules for SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ross Fuhrman\"},{\"@type\":\"Person\",\"name\":\"Anshuman Singh\"},{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":1532,"description":1533,"authors":1538,"heroImage":1534,"date":1542,"body":1543,"category":695,"tags":1544},[1539,1540,1541],"Ross Fuhrman","Anshuman Singh","Julian Thome","2021-09-08","In summer 2021, the [Vulnerability\nResearch](/handbook/engineering/development/sec/secure/vulnerability-research/)\nand [Static\nAnalysis](/handbook/engineering/development/sec/secure/static-analysis/)\n\nteams launched the [Google Summer of Code\n(GSoC)](https://summerofcode.withgoogle.com/) project: [Write vulnerability\ndetection rules for\nSAST](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/3).\n\n\nFor this project, we built and implemented a framework to helps transition\nGitLab away from our current SAST tools over to Semgrep. Semgrep is a\nlanguage-agnostic SAST tool that is gaining popularity in CI/CD\nenvironments.\n\nBefore replacing an analyzer with the corresponding Semgrep configuration\n(called rule-sets), we need to ensure that they are equivalent – in that\nthey yield the same set of findings.\n\n\nFor this purpose, we built a testing framework that helps us assess the\nquality of a Semgrep rule-set. This framework has been used to guide the\nreplacement of\n[flawfinder](https://gitlab.com/gitlab-org/security-products/analyzers/flawfinder),\na C/C++ analyzer with a corresponding Semgrep rule-set. This new testing\nframework leverages the power of GitLab CI/CD.\n\n\n## Preliminaries\n\n\n### GitLab and the Google Summer Of Code (GSoC)\n\n\nThe Google Summer of Code (GSoC) is a 10-week program that enlists student\ninterns to work on an open source project in collaboration with open source\norganizations. For GSoC 2021, GitLab offered [4 GSoC projects to the GSoC\ninterns](/blog/gsoc-at-gitlab/). The [interns completed each of\nproject](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues)\nunder the guidance of a GitLab team member who serves as their mentor and\nprovides regular feedback and assistance when needed.\n\n\n**[Read reflections from the Google Summer of Code interns about [what it\nwas like working with GitLab](/blog/gsoc-at-gitlab/)]**\n\n\n### About Semgrep\n\n\n[Semgrep](https://semgrep.dev/) is a language-agnostic static-analysis\n(SAST) tool that is powered by\n[tree-sitter](https://tree-sitter.github.io/tree-sitter/). Tree-sitter is a\nrobust parser-generator tool that supports parsing a variety of languages.\n\n\nSemgrep supports a\n[rule-syntax](https://semgrep.dev/docs/writing-rules/rule-syntax/) which can\nbe used to formulate detection rules in a configuration-as-code YAML format.\nA Semgrep rule determines the findings that Semgrep is supposed to detect.\nThese rules are combined together to create a rule-set.\n\n\n### About GitLab SAST\n\n\nGitLab is a complete DevSecOps platform and integrates a [variety of static\nanalysis\ntools](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html)\nthat help developers find vulnerabilities as early as possible in the\nsoftware development lifecycle (SDLC).\n\n\nSince all the integrated SAST tools are very different in terms of\nimplementation as well as tech stack they depend on, the SAST tools are all\nwrapped in Docker images. The wrappers translate the native vulnerability\nreports to a [generic, common report\nformat](https://docs.gitlab.com/ee/user/application_security/sast/) which is\nmade available by means of the `gl-sast-report.json` artifact. This generic\nreport is GitLab's common interface between analyzers and the GitLab Rails\nbackend.\n\n\n## Write vulnerability detection rules\n\n\n### Some background on our SAST tools\n\n\nOver time, the growing number of integrated SAST tools has become a\nmaintenance burden for GitLab due to two major contributing factors.\n\n\n1. **Integration cost**: All SAST tools have different release cycles – new\nreleases have to be pulled in immediately so that our users can benefit from\nthem. Given the large amount of integrated SAST tools, the time spent to\nmonitor the SAST tools for new releases, integrating and testing them is\nexpensive in terms of engineering effort/time.\n\n\n1. **Inflexibility**: Adapting or modifying SAST tools behavior is\nnon-trivial because each tool is based on different technologies. Also,\nupstream contributions to the original analyzer repositories are not\nguaranteed to be included by the maintainers. In these cases, they require\nus to fork a project which is not a scalable solution with regards to\nmaintenance effort.\n\n\nGitLab is in the process of replacing various SAST tools with a single,\nlanguage-agnostic SAST tool, called Semgrep, to fix these problems. Semgrep\ncan be configured by means of rules that are used to define what Semgrep is\nsupposed to find. These rules are provided as YAML configuration files so it\nis fairly easy to modify the behavior of Semgrep to different use cases.\n\nSemgrep's configuration-as-code approach paired with its language support\nenables us to replace multiple analyzers, which effectively reduces the\nmaintenance burden.\n\n\nHowever, the SAST tool replacement itself is a challenging process. For the\nmajority of use cases we have to assume that there is already a large amount\nof historic vulnerability data recorded and acted upon using [GitLab's\nvulnerability management\nfeatures](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/).\nUsers may also have grown accustomed to working with certain analyzers and\nmay even have a certain level of expectation with regards to the findings\nproduced by the analyzer.\n\n\nA smooth transition from a language-specific analyzer to a corresponding\nSemgrep rule-set must be guaranteed by meeting a certain level of quality\nassurance. A rule-set should be at least as good as the results produced by\nthe original analyzers, also known as parity. In turn, parity required we\nbuild test-suites to be used to measure the gap (in terms of rule coverage)\nbetween the original analyzer and the rule-set that is to replace it. A good\nquality rule-set is expected to perform at least as well as the SAST tool it\naims to replace (zero gap, full parity).\n\n\nThere are cases where the original SAST tool may falsely report\nvulnerabilities. In these situations, we aim to improve our rule-set in a\ncontrolled manner by explicitly documenting our improvements. However,\nbefore improving a rule-set, we want to start from a position of complete\nparity so that we have a holistic view of the impact incurred by single rule\nimprovements. This documentation of applied improvements is important so we\ncan justify changes with regard to reported findings to the customer.\n\n\nThere are three challenges we tried to address with this project:\n\n\n1. **Rule management**: Provide a central rule repository to store,\ndistribute and track changes applied to rules as well as test-cases.\n\n1. **Rule testing**: Every change applied to a rule in the rule repository\ntriggers an automated gap-analysis that measure the quality of the rules in\ncomparison to the original analyzers.\n\n1. **Analyzer replacement**: Replace at least one SAST tool (in our case\nflawfinder) with a corresponding rule-set – use the testing framework to\nensure that the rule-set is on par with the original SAST tool.\n\n\nWe unpack each of these challenges in the next section.\n\n\n### How we approached these challenges\n\n\nThe architecture of the rule-testing framework is depicted in the code\nsnippets below. All the Semgrep rules and the corresponding test-cases are\nstored in a central rule repository. Changes that are applied to the rules\ntrigger the execution of our rule testing framework that uses the rules and\ntest-cases to perform an automated gap analysis.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n  crr[GitLab Rule Repository]\n\n  bandit[\"GitLab bandit\"]\n  bx[\"gl-sast-report.json\"]\n  sbx[\"gl-sast-report.json\"]\n  breport[\"bandit gap analysis report\"]\n\n  subgraph bandit_comparison[\"bandit comparison\"]\n    banditsemgrep[\"GitLab Semgrep\"]\n    banditcompare[\"compare\"]\n    bandit --> |run analyzer on test-cases| bx\n    banditsemgrep --> |run analyzer on test-cases| sbx\n    bx --> banditcompare\n    sbx --> banditcompare\n  end\n  crr -->|bandit rules + rule id mappings| banditsemgrep\n  banditcompare --> breport\n\n  fx[\"gl-sast-report.json\"]\n  fbx[\"gl-sast-report.json\"]\n  freport[\"flawfinder gap analysis report\"]\n  flawfinder[\"GitLab flawfinder\"]\n\n  subgraph flawfinder_comparison[\"flawfinder comparison\"]\n    flawfindersemgrep[\"GitLab Semgrep\"]\n    flawfindercompare[\"compare\"]\n    flawfinder --> |run analyzer on test-cases| fx\n    flawfindersemgrep --> |run analyzer on test-cases| fbx\n    fx --> flawfindercompare\n    fbx --> flawfindercompare\n  end\n  crr -->|flawfinder rules + rule id mappings| flawfindersemgrep\n  flawfindercompare --> freport\n\n\u003C/pre>\n\n\nThe rule testing framework is a compass that guides us through the rule\ndevelopment process by automatically measuring the efficacy of rules that\nare stored in the central rule (git) repository. This measurement happens\nduring a comparison step that validates the findings reported by the\noriginal analyzer against the corresponding Semgrep rule-set. For the\ncomparisons we cross-validate the SAST\n\nreports\n([`gl-sast-report.json`](https://docs.gitlab.com/ee/user/application_security/sast/))\nthat adhere to the GitLab security report format. Since the main goal is to\nachieve parity between the original analyzer and our corresponding Semgrep\nrules, we treat the original analyzer as the baseline. The code snippet\nabove depicts two example comparison steps for bandit and flawfinder.  The\ngap analysis is explained in more detail in the \"rule testing\" section\nbelow.\n\n\nUsing a central rule git repository allows us to manage and easily track\nchanges that are applied to rules and their corresponding test-cases in a\ncentral location. By means of GitLab CI/CD, we have a mechanism to\nautomatically run tests that enforce constraints and guidelines on the rules\nand test-cases. Upon rule changes, we automatically trigger the rule-testing\nframework which enables us to spot gaps in our rules instantly. The\nstructure of the central rule repository is detailed in the \"rule\nmanagement\" section below.\n\n\n#### How we addressed rule management challenges\n\n\nThe central rule repository is used to store, keep track of changes applied\nto `rules/test-cases` for a variety of different languages. By having a\nseparate rule repository we can add CI jobs to test, verify, and enforce\nsyntax guidelines.\n\n\nThe structure we use for the central rule repository is depicted below and\nfollows the structure: `\u003Clanguage>/\u003Cruleclass>/{rule-\u003Crulename>.yml,\ntest-\u003Crulename>.*}` where language denotes the target programming language,\n`\u003Cruleclass>` is a descriptive name for the class of issues the rule aims to\ndetect and `\u003Crulename>` is a descriptive name for the actual rule.  We can\nhave multiple test cases per rule (all prefixed with `test-`) and rule files\n`rule-\u003Crulename>.yml` that are prefixed with `rule-` – a rule file contains\na single Semgrep rule.\n\n\n``` bash\n\n.\n\n├── mappings\n\n│   └── analyzer.yml\n\n├── c\n\n│   ├── buffer\n\n│   │   ├── rule-strcpy.yml\n\n│   │   ├── test-strcpy.c\n\n│   │   ├── rule-memcpy.yml\n\n│   │   └── test-memcpy.c\n\n│   └── ...\n\n└── javascript\n\n│   └── ...\n\n└── python\n\n│    ├── assert\n\n│    │   ├── rule-assert.yml\n\n│    │   └── test-assert.py\n\n│    └── exec\n\n│    │   ├── rule-exec.yml\n\n│    │   ├── test-exec.yml\n\n│    │   ├── rule-something.yml\n\n│    │   └── test-something.yml\n\n│    └── permission\n\n│    │   ├── rule-chmod.yml\n\n│    │   └── test-chmod.py\n\n│    └── ...\n\n└── ...\n\n```\n\n\nIn addition to the rules, we also store mapping files (in the `mappings`\nsubdirectory). The mappings directory in this repository contains YAML\nconfiguration/mapping files that map native analyzer IDs to the\ncorresponding Semgrep rules. An analyzer ID uniquely identifies the type of\nfinding. The information in the mapping files helps us to correlate the\nfinding from the original analyzer with their corresponding Semgrep findings\nand vice versa.\n\n\nThe mapping files are digested by the testing framework to perform an\nautomated gap analysis. The goal of this analysis is to check if there is an\nunexpected deviation between Semgrep (with the rules in this repository) and\na given analyzer.\n\n\nA mapping file groups distinct rules into rule-sets and, thus, can be used\nto bundle different rules based on a certain domain. An excerpt from a\nmapping file is depicted below – it maps bandit rules (identified by bandit\nIDs) to Semgrep rules from the central rule repository.\n\n\n``` yaml\n\nbandit:\n  - id: \"B101\"\n    rules:\n      - \"python/assert/rule-assert_used\"\n  - id: \"B102\"\n    rules:\n      - \"python/exec/rule-exec_used\"\n  - id: \"B103\"\n    rules:\n      - \"python/file_permissions/rule-general_bad_permission\"\n  - id: \"B104\"\n    rules:\n      - \"python/bind_all_interfaces/rule-general_bindall_interfaces\"\n```\n\n\n#### How the rule testing framework works\n\n\nThe test-oracle/baseline is provided by the original analyzer when executed\non the test-files. The rules in the central rule repository are compared and\nevaluated against this baseline. The execution of the testing framework is\ntriggered by any change applied to the rule repository.\n\n\nWe run all analyzers (flawfinder, bandit, etc.) and their corresponding\nSemgrep rule-sets (as defined by the mapping files) on the test-files from\nthe GitLab rule repository. The resulting `gl-sast-reports.json` reports\nthat are produced by the original analyzer and by the Semgrep analyzer are\nthen compared in a pairwise manner. To identify identical findings in both\nreports, we leverage the information from the mapping files that maps the\nrule-ids of the baseline analyzer to the corresponding Semgrep rule-ids for\nthe rules stored in the central rule repository.\n\n\nAs output, we produce a gap analysis report (in markdown format). The gap\nanalysis lists all the findings that have been reported by the original\nanalyzers and groups them into different tables (based on the native\nrule-ids). The screenshot below shows a single table from the gap analysis\nreport.\n\n\n![Gap Analysis\nReport](https://about.gitlab.com/images/blogimages/testing-framework-report.png){:\n.shadow.center}\n\nAn example table from the gap analysis report.\n\n{: .note.text-center}\n\n\nThe `X` symbols indicate whether the analyzers (in the example, flawfinder\nand Semgrep) were able to detect a given finding. The concrete findings as\nwell as the rule files are linked in the table. To reach full coverage,\nflawfinder as well as Semgrep have to cover the same findings for all the\nrules that are reported by the baseline.\n\n\n#### The analyzer replacement\n\n\nTo build a Semgrep rule-set that is on par with the capabilities of the\noriginal/baseline analyzer we leveraged the newly created testing framework.\nFlawfinder, a C/C++ analyzer, was the first analyzer we fully migrated to\nSemgrep using the testing framework as a compass.\n\n\nFirst, we checked the flawfinder implementation to identify the implemented\nrules. Given that flawfinder is a Python script and that the rules are\nessentially stored in a dictionary/hash data-structure, we were able to\nsemi-automatically extract the rules and generate the corresponding Semgrep\nrule files. We were also able to source the test-files from the flawfinder\nsource code repository.\n\n\nAfter the initial import of the first set of rules-files and test-cases, we\nused the information provided by the testing-framework to see which rules\nneeded refinement.\n\n\nWe responded to the information provided by our testing framework in the\nfollowing way:\n\n\n1. Findings covered by Baseline and covered by our rule-set: Nothing to be\ndone.\n\n1. Findings covered by Baseline but not covered by our rule-set: This\ndenotes an incomplete ruleset. In this case we extended the rule-file by\nproviding additional `pattern` entries.\n\n1. Findings not covered by Baseline but covered by our rule-set: This\nusually denotes that some rules are too vaguely formulated. In this case, we\nrefined our rules by using exclusions, e.g., by using `pattern-not` or by\nadding more detail to an already existing pattern.\n\n\nThe rule design was an iterative process where we closed the gaps between\nour semgrep rule-set and the flawfinder baseline in an iterative manner\nusing the testing framework as an oracle to ultimately achieve 100% parity.\n\n\n## How the GSoC project helped GitLab\n\n\nIn this GSoC project we successfully built an automated rule/configuration\ntesting framework that is driven by GitLab CI/CD capabilities and that\nprovided the data we needed to replace flawfinder reliably and quickly with\na corresponding Semgrep rule-set.\n\n\nIf you are interested in finding out more information about this GSoC\nproject, please check out the following repositories:\n\n\n- [Central Rule\nRepository](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/playground/sast-rules)\n\n- [Testing\nFramework](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing)\n\n- [Gap Analysis Computation\nTool](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/report-diff)\n\n- [Repository to track gap\nstatistics](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing-stats)\n",[695,975,9],{"slug":1546,"featured":6,"template":699},"write-vulnerability-detection-rules","content:en-us:blog:write-vulnerability-detection-rules.yml","Write Vulnerability Detection Rules","en-us/blog/write-vulnerability-detection-rules.yml","en-us/blog/write-vulnerability-detection-rules",{"_path":1552,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1553,"content":1559,"config":1565,"_id":1567,"_type":13,"title":1568,"_source":15,"_file":1569,"_stem":1570,"_extension":18},"/en-us/blog/all-aboard-merge-trains",{"title":1554,"description":1555,"ogTitle":1554,"ogDescription":1555,"noIndex":6,"ogImage":1556,"ogUrl":1557,"ogSiteName":685,"ogType":686,"canonicalUrls":1557,"schema":1558},"How starting merge trains improve efficiency for DevOps","No more queuing and waiting for pipeline results! Read how merge trains will speed up your deployments while making sure master stays green.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678419/Blog/Hero%20Images/merge_trains.jpg","https://about.gitlab.com/blog/all-aboard-merge-trains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How starting merge trains improve efficiency for DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":1554,"description":1555,"authors":1560,"heroImage":1556,"date":1562,"body":1563,"category":762,"tags":1564},[1561],"Orit Golowinski","2020-01-30","\nA large percentage of a developer's day is spent updating their branches and rebasing, they are essentially \"racing\" their teammates to get their merge requests merged. Keeping the master branch green is critical for [continuous delivery](/topics/continuous-delivery/). When the production build breaks, it means your new code isn't going live, which impacts users and revenue. The only way to be 100% sure the master branch stays green when new code merges is to run the pipeline using the latest version of the master branch. For teams that have a high volume of merges, this can be difficult or even impossible. In the time it takes the pipeline to complete one code change, other changes can get merged to master with the potential for conflict. The only way to mitigate this is to queue and sequence the changes so that once a production pipeline starts, other code doesn't get merged ahead of that change. \n\n## What are merge trains and how do they help?\n\n Merge trains introduce a way to order the flow of changes into the target branch (usually master). When you have teams with a high number of changes in the target branch, this can cause a situation where during the time it takes to validate merged code for one change, another change has been merged to master, invalidating the previous merged result.\n\nBy using merge trains, each merge request joins as the last item in that train with each merge request being processed in order. However, instead of queuing and waiting, each item takes the completed state of the previous (pending) [merge ref](https://gitlab.com/gitlab-org/gitlab-foss/issues/47110) (the merge result of the merge), adds its own changes, and starts the pipeline immediately in parallel under the assumption that everything is going to pass.\n\nIf all pipelines in the merge train are completed successfully, then no pipeline time is wasted on queuing or retrying. Pipelines invalidated through failures are immediately canceled, the MR causing the failure is removed, and the rest of the MRs in the train are requeued without the need for manual intervention.\n\nAn example of a merge train:\n\n![Diagram of merge trains](https://about.gitlab.com/images/blogimages/merge_trains-1.png){: .shadow}\n\nMR1 and MR2 join a merge train. When MR3 attempts to join, the merge fails and it is removed from the merge train. MR4 restarts at the point that MR3 fails, and attempts to run without the contents of MR3.\nMR3 will remain open in failed state, so that the author can rebase and fix the failure before attempting to merge again.\n\nHere is a demonstration video that explains the advantage of the merge train feature. In this video, we'll simulate the common problem in a workflow without merge trains, and later, we resolve the problem by enabling a merge train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## How the merge trains feature has evolved so far\n\nAfter releasing [merge trains](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains) in GitLab 12.0, we immediately started to use this feature internally, and collected a lot of valuable feedback which helped us to improve and enhance the feature.\n\nWe started by tuning the [merge train concurrency](https://gitlab.com/gitlab-org/gitlab/issues/31692). We understood that while merge trains is a feature that is designed to improve efficiency by making sure that master stays green, it can also create an unwanted bottleneck that slows down productivity if your merge requests needs to wait in a long queue in order to get merged.\n\nWe also noticed that many developers were \"skipping the line\" and merging their changes immediately because they did not understand the effect that merging immediately has on other users, so we added a [warning](https://gitlab.com/gitlab-org/gitlab/issues/12679) to clarify this common misunderstanding. We intentionally left the option to still \"merge immediately\" since we also understand the importance of an urgent merge request, such as a \"hot fix\" that must be able to skip to the front of the merge train. Another improvement was the ability to [“squash & merge” as part of the merge train](https://gitlab.com/gitlab-org/gitlab/issues/13001) in order to maintain a clean commit history.\n\nHere is a demonstration video that explains how squash & merge works with merge trains.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/pA5SfHwlq0s\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## What's next\n\nWe plan to add more important features to the support of merge trains. The first is that [merge trains should support fast-forward merge](https://gitlab.com/gitlab-org/gitlab/issues/35628). This could help solve a fundamental contention problem of fast-forward merges: The CI pipeline must be run every time the merge request is rebased, and the merge request must be rebased every time master changes – which is frequently! This problem significantly limits the frequency with which merge requests can be merged.\n\nThe second feature, [API support for merge trains](https://gitlab.com/gitlab-org/gitlab/issues/32665), will extend the ability to automate your workflows using merge trains.\n\nWe want to hear from you! Tell us how merge trains have improved your workflow, or give us more insight into how we can improve merge trains to work better for you. [Give us your feedback by commenting here](https://gitlab.com/groups/gitlab-org/-/epics/2408).\n\nCover image by [Vidar Nordli-Mathisen\n](https://images.unsplash.com/photo-1525349769815-0e6ba4e0bbdd?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1611&q=80) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[108,9,1132,869],{"slug":1566,"featured":6,"template":699},"all-aboard-merge-trains","content:en-us:blog:all-aboard-merge-trains.yml","All Aboard Merge Trains","en-us/blog/all-aboard-merge-trains.yml","en-us/blog/all-aboard-merge-trains",5,[678,704,728,750,770,789,812,832,854],1758662328090]